aboutsummaryrefslogtreecommitdiffstats
path: root/contrib
diff options
context:
space:
mode:
authorthegeorg <thegeorg@yandex-team.ru>2022-05-10 22:16:03 +0300
committerthegeorg <thegeorg@yandex-team.ru>2022-05-10 22:16:03 +0300
commit09c71d918d4d0b0ebf67e1ab41aa90ddf587a3f2 (patch)
treedd44d2cb68e2845c2d4c367b66893f3e043a6e8e /contrib
parent5eb4a8a2d487411924e1d1b27c454223dcf35005 (diff)
downloadydb-09c71d918d4d0b0ebf67e1ab41aa90ddf587a3f2.tar.gz
Update contrib/restricted/aws/s2n to 1.3.12
ref:f8279d764b4c00974a63543a1364c91e2b81b7a6
Diffstat (limited to 'contrib')
-rw-r--r--contrib/libs/hyperscan/runtime_avx2/CMakeLists.txt2
-rw-r--r--contrib/libs/hyperscan/runtime_avx512/CMakeLists.txt2
-rw-r--r--contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt2
-rw-r--r--contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt2
-rw-r--r--contrib/restricted/aws/s2n/.yandex_meta/devtools.copyrights.report84
-rw-r--r--contrib/restricted/aws/s2n/.yandex_meta/devtools.licenses.report119
-rw-r--r--contrib/restricted/aws/s2n/.yandex_meta/licenses.list.txt883
-rw-r--r--contrib/restricted/aws/s2n/CMakeLists.darwin.txt284
-rw-r--r--contrib/restricted/aws/s2n/CMakeLists.linux.txt289
-rw-r--r--contrib/restricted/aws/s2n/README.md110
-rw-r--r--contrib/restricted/aws/s2n/VERSIONING.rst40
-rw-r--r--contrib/restricted/aws/s2n/api/s2n.h1056
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_aes_gcm.c208
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_chacha20_poly1305.c76
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_cbc_cipher_3des.c20
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_cbc_cipher_aes.c28
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_certificate.c555
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_certificate.h14
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_cipher.c6
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_composite_cipher_aes_sha.c48
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_crypto.c35
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_crypto.h4
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_dhe.c144
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_drbg.c200
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_drbg.h20
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_ecc_evp.c197
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_ecdsa.c96
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_ecdsa.h2
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_evp.c11
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_evp.h13
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_evp_signing.c154
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_evp_signing.h29
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_fips.c12
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_hash.c434
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_hash.h2
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_hkdf.c53
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_hmac.c162
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_hmac.h1
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_libcrypto.c42
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_openssl.h11
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_openssl_x509.c25
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_openssl_x509.h3
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_pkey.c42
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_rsa.c36
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_rsa.h2
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_rsa_pss.c68
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_rsa_pss.h3
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_rsa_signing.c169
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_rsa_signing.h4
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_sequence.c9
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_signature.h10
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_null.c2
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_rc4.c16
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_tls13_keys.c247
-rw-r--r--contrib/restricted/aws/s2n/crypto/s2n_tls13_keys.h19
-rw-r--r--contrib/restricted/aws/s2n/error/s2n_errno.c35
-rw-r--r--contrib/restricted/aws/s2n/error/s2n_errno.h66
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r1/aes_ctr_prf.c8
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r1/bike_r1_kem.c40
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r1/decode.c18
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r1/openssl_utils.c14
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r1/sampling.c6
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r1/sampling.h4
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.c8
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_r2_kem.c74
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.c18
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.c14
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.c6
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.h4
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/LICENSE202
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes.h62
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes_ctr_prf.c97
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes_ctr_prf.h43
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/bike_defs.h91
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/bike_r3_kem.c288
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/cleanup.h63
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode.c280
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode.h12
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx2.c173
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx512.c167
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_internal.h86
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_portable.c126
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/defs.h107
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/error.c10
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/error.h33
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x.h29
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_internal.h177
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_inv.c156
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx2.c188
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx512.c135
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_portable.c48
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul.c113
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx2.c109
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx512.c109
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_pclmul.c155
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_portable.c77
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_vpclmul.c135
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_portable.c103
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling.c170
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling.h40
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx2.c123
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx512.c123
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_internal.h66
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_portable.c60
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/sha.h43
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/types.h120
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/utilities.c24
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/utilities.h139
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r3/x86_64_intrinsic.h132
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/indcpa.c2
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/kyber_90s_r2_kem.c10
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/ntt.h4
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r2/indcpa.c2
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r2/kyber_r2_kem.c10
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r2/ntt.h4
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-1600-times4-SIMD256_avx2.c1284
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-1600-times4-SnP_avx2.h63
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-SIMD256-config_avx2.h3
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-align_avx2.h31
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-brg_endian_avx2.h139
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_align_avx2.h19
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_basemul_avx2.S105
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd.c104
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd.h11
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd_avx2.c137
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd_avx2.h15
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_consts_avx2.c122
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_consts_avx2.h43
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202.c (renamed from contrib/restricted/aws/s2n/pq-crypto/sike_r2/fips202.c)141
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202.h68
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202x4_avx2.c210
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202x4_avx2.h70
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fq_avx2.S122
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa.c323
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa.h15
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa_avx2.c363
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa_avx2.h25
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_invntt_avx2.S255
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_kem.c158
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt.c122
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt.h19
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt_avx2.S218
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt_avx2.h28
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_params.h31
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly.c300
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly.h61
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly_avx2.c453
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly_avx2.h80
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec.c186
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec.h40
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec_avx2.c227
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec_avx2.h39
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce.c60
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce.h15
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce_avx2.h13
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_rejsample_avx2.c420
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_rejsample_avx2.h14
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_shuffle_avx2.S272
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_symmetric-shake.c49
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_symmetric.h17
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/s2n_pq.c232
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/s2n_pq.h21
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/s2n_pq_random.c8
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r1/P503_internal_r1.h2
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r1/fips202_r1.h2
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r1/sidh_r1.c2
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r1/sike_r1_kem.c22
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434.c117
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434_api.h70
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434_internal.h225
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r2/config.h218
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r2/ec_isogeny.c313
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r2/fips202.h14
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r2/fp.c241
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r2/fpx.c387
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r2/sidh.c286
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r2/sike_r2_kem.c120
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r2/sikep434r2_fp_x64_asm.S962
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3.c146
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3.h181
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_api.h78
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_ec_isogeny.c348
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_ec_isogeny.h46
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fips202.c417
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fips202.h23
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp.c297
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp.h39
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp_x64_asm.S1054
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp_x64_asm.h38
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fpx.c478
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fpx.h65
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_kem.c112
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_sidh.c310
-rw-r--r--contrib/restricted/aws/s2n/stuffer/s2n_stuffer.c201
-rw-r--r--contrib/restricted/aws/s2n/stuffer/s2n_stuffer.h3
-rw-r--r--contrib/restricted/aws/s2n/stuffer/s2n_stuffer_base64.c30
-rw-r--r--contrib/restricted/aws/s2n/stuffer/s2n_stuffer_file.c39
-rw-r--r--contrib/restricted/aws/s2n/stuffer/s2n_stuffer_network_order.c63
-rw-r--r--contrib/restricted/aws/s2n/stuffer/s2n_stuffer_pem.c44
-rw-r--r--contrib/restricted/aws/s2n/stuffer/s2n_stuffer_text.c102
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_client_alpn.c60
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_client_early_data_indication.c186
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_client_ems.c63
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_client_key_share.c467
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_client_max_frag_len.c24
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_client_pq_kem.c14
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_client_psk.c321
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_client_renegotiation_info.c2
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_client_server_name.c37
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_client_session_ticket.c12
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_client_status_request.c8
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_client_supported_groups.c78
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_client_supported_versions.c25
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_cookie.c28
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_early_data_indication.h22
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_ec_point_format.c4
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_ems.h (renamed from contrib/restricted/aws/s2n/utils/s2n_str.h)6
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_extension_list.c70
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_extension_list.h2
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type.c83
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type.h4
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type_lists.c42
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_key_share.c34
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_key_share.h3
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_nst_early_data_indication.c80
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_psk_key_exchange_modes.c20
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_psk_key_exchange_modes.h2
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_quic_transport_params.c27
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_server_alpn.c20
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_server_certificate_status.c44
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_server_early_data_indication.c106
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_server_ems.c74
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_server_key_share.c257
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_server_max_fragment_length.c34
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_server_psk.c24
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_server_renegotiation_info.c6
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_server_sct_list.c14
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_server_server_name.c4
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_server_session_ticket.c2
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_server_status_request.c2
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_server_supported_versions.c17
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_supported_versions.c16
-rw-r--r--contrib/restricted/aws/s2n/tls/extensions/s2n_supported_versions.h2
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_aead.c107
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_alerts.c160
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_alerts.h78
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_async_pkey.c445
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_async_pkey.h22
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_auth_selection.c64
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_auth_selection.h1
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_cbc.c31
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_change_cipher_spec.c20
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_cipher_preferences.c635
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_cipher_preferences.h30
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_cipher_suites.c162
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_cipher_suites.h9
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_client_cert.c103
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_client_cert_verify.c72
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_client_finished.c36
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_client_hello.c440
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_client_hello.h15
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_client_hello_request.c40
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_client_key_exchange.c130
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_config.c391
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_config.h53
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_connection.c1215
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_connection.h143
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_connection_evp_digests.c112
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_connection_evp_digests.h36
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_crypto.h38
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_early_data.c435
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_early_data.h63
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_early_data_io.c275
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_ecc_preferences.c15
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_ecc_preferences.h3
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_encrypted_extensions.c12
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_establish_session.c13
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_handshake.c160
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_handshake.h123
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_handshake_hashes.c126
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_handshake_hashes.h44
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_handshake_io.c660
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_handshake_transcript.c69
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_handshake_type.c73
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_handshake_type.h90
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_internal.h57
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_kem.c328
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_kem.h62
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_kem_preferences.c64
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_kem_preferences.h3
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_kex.c156
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_key_log.c174
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_key_log.h27
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_key_update.c52
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_key_update.h2
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_ocsp_stapling.c2
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_post_handshake.c71
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_prf.c655
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_prf.h27
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_protocol_preferences.c141
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_protocol_preferences.h23
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_psk.c597
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_psk.h68
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_quic_support.c54
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_quic_support.h21
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_record.h53
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_record_read.c43
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_record_read_aead.c39
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_record_read_cbc.c54
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_record_read_composite.c38
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_record_read_stream.c38
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_record_write.c185
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_recv.c61
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_resume.c741
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_resume.h39
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_security_policies.c357
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_security_policies.h22
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_send.c40
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_server_cert.c24
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_server_cert_request.c59
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_server_extensions.c22
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_server_finished.c38
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_server_hello.c149
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_server_hello_retry.c76
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_server_key_exchange.c156
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_server_new_session_ticket.c357
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_shutdown.c23
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_signature_algorithms.c117
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_signature_algorithms.h6
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_signature_scheme.c18
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_signature_scheme.h3
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_tls.h6
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_tls13.c81
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_tls13.h11
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_tls13_certificate_verify.c70
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_tls13_handshake.c357
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_tls13_handshake.h6
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_tls13_key_schedule.c328
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_tls13_key_schedule.h22
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_tls13_secrets.c626
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_tls13_secrets.h55
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_tls_digest_preferences.h38
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_tls_parameters.h69
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_x509_validator.c103
-rw-r--r--contrib/restricted/aws/s2n/tls/s2n_x509_validator.h22
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_array.c111
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_array.h10
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_asn1_time.c15
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_blob.c83
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_blob.h27
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_ensure.c3
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_ensure.h74
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_fork_detection.c363
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_fork_detection.h28
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_init.c69
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_map.c85
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_mem.c109
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_random.c121
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_result.c12
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_result.h15
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_rfc5952.c8
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_safety.c110
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_safety.h347
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_safety_macros.h609
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_set.c58
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_set.h6
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_socket.c63
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_str.c49
-rw-r--r--contrib/restricted/aws/s2n/utils/s2n_timer.c6
369 files changed, 32919 insertions, 10405 deletions
diff --git a/contrib/libs/hyperscan/runtime_avx2/CMakeLists.txt b/contrib/libs/hyperscan/runtime_avx2/CMakeLists.txt
index 6d4e1efcd9..6cd442db9c 100644
--- a/contrib/libs/hyperscan/runtime_avx2/CMakeLists.txt
+++ b/contrib/libs/hyperscan/runtime_avx2/CMakeLists.txt
@@ -19,6 +19,8 @@ target_compile_options(libs-hyperscan-runtime_avx2 PRIVATE
-DHAVE_AVX
-mavx2
-mfma
+ -mbmi
+ -mbmi2
-DHAVE_AVX2
-DCrc32c_ComputeBuf=avx2_Crc32c_ComputeBuf
-DblockInitSufPQ=avx2_blockInitSufPQ
diff --git a/contrib/libs/hyperscan/runtime_avx512/CMakeLists.txt b/contrib/libs/hyperscan/runtime_avx512/CMakeLists.txt
index 7ddf41f582..66964988d8 100644
--- a/contrib/libs/hyperscan/runtime_avx512/CMakeLists.txt
+++ b/contrib/libs/hyperscan/runtime_avx512/CMakeLists.txt
@@ -19,6 +19,8 @@ target_compile_options(libs-hyperscan-runtime_avx512 PRIVATE
-DHAVE_AVX
-mavx2
-mfma
+ -mbmi
+ -mbmi2
-DHAVE_AVX2
-mavx512f
-mavx512cd
diff --git a/contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt b/contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt
index 971f0048c2..3728d92779 100644
--- a/contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt
+++ b/contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt
@@ -85,4 +85,6 @@ set_property(
COMPILE_OPTIONS
-mavx2
-mfma
+ -mbmi
+ -mbmi2
)
diff --git a/contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt b/contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt
index 44689c6ee4..cbb6afb997 100644
--- a/contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt
+++ b/contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt
@@ -81,4 +81,6 @@ set_property(
COMPILE_OPTIONS
-mavx2
-mfma
+ -mbmi
+ -mbmi2
)
diff --git a/contrib/restricted/aws/s2n/.yandex_meta/devtools.copyrights.report b/contrib/restricted/aws/s2n/.yandex_meta/devtools.copyrights.report
index c6657ea2b7..fb08469f30 100644
--- a/contrib/restricted/aws/s2n/.yandex_meta/devtools.copyrights.report
+++ b/contrib/restricted/aws/s2n/.yandex_meta/devtools.copyrights.report
@@ -48,6 +48,7 @@ BELONGS ya.make
crypto/s2n_cipher.c [2:2]
crypto/s2n_cipher.h [2:2]
crypto/s2n_composite_cipher_aes_sha.c [2:2]
+ crypto/s2n_crypto.c [2:2]
crypto/s2n_crypto.h [2:2]
crypto/s2n_dhe.c [2:2]
crypto/s2n_dhe.h [2:2]
@@ -59,6 +60,8 @@ BELONGS ya.make
crypto/s2n_ecdsa.h [2:2]
crypto/s2n_evp.c [2:2]
crypto/s2n_evp.h [2:2]
+ crypto/s2n_evp_signing.c [2:2]
+ crypto/s2n_evp_signing.h [2:2]
crypto/s2n_fips.c [2:2]
crypto/s2n_fips.h [2:2]
crypto/s2n_hash.c [2:2]
@@ -67,8 +70,10 @@ BELONGS ya.make
crypto/s2n_hkdf.h [2:2]
crypto/s2n_hmac.c [2:2]
crypto/s2n_hmac.h [2:2]
+ crypto/s2n_libcrypto.c [2:2]
crypto/s2n_openssl.h [2:2]
crypto/s2n_openssl_evp.h [2:2]
+ crypto/s2n_openssl_x509.c [2:2]
crypto/s2n_openssl_x509.h [2:2]
crypto/s2n_pkey.c [2:2]
crypto/s2n_pkey.h [2:2]
@@ -138,6 +143,45 @@ BELONGS ya.make
pq-crypto/bike_r2/types.h [1:1]
pq-crypto/bike_r2/utilities.c [1:1]
pq-crypto/bike_r2/utilities.h [1:1]
+ pq-crypto/bike_r3/aes.h [1:1]
+ pq-crypto/bike_r3/aes_ctr_prf.c [1:1]
+ pq-crypto/bike_r3/aes_ctr_prf.h [1:1]
+ pq-crypto/bike_r3/bike_defs.h [1:1]
+ pq-crypto/bike_r3/bike_r3_kem.c [1:1]
+ pq-crypto/bike_r3/cleanup.h [1:1]
+ pq-crypto/bike_r3/decode.c [1:1]
+ pq-crypto/bike_r3/decode.h [1:1]
+ pq-crypto/bike_r3/decode_avx2.c [1:1]
+ pq-crypto/bike_r3/decode_avx512.c [1:1]
+ pq-crypto/bike_r3/decode_internal.h [1:1]
+ pq-crypto/bike_r3/decode_portable.c [1:1]
+ pq-crypto/bike_r3/defs.h [1:1]
+ pq-crypto/bike_r3/error.c [1:1]
+ pq-crypto/bike_r3/error.h [1:1]
+ pq-crypto/bike_r3/gf2x.h [1:1]
+ pq-crypto/bike_r3/gf2x_internal.h [1:1]
+ pq-crypto/bike_r3/gf2x_inv.c [1:1]
+ pq-crypto/bike_r3/gf2x_ksqr_avx2.c [1:1]
+ pq-crypto/bike_r3/gf2x_ksqr_avx512.c [1:1]
+ pq-crypto/bike_r3/gf2x_ksqr_portable.c [1:1]
+ pq-crypto/bike_r3/gf2x_mul.c [1:1]
+ pq-crypto/bike_r3/gf2x_mul_avx2.c [1:1]
+ pq-crypto/bike_r3/gf2x_mul_avx512.c [1:1]
+ pq-crypto/bike_r3/gf2x_mul_base_pclmul.c [1:1]
+ pq-crypto/bike_r3/gf2x_mul_base_portable.c [1:1]
+ pq-crypto/bike_r3/gf2x_mul_base_vpclmul.c [1:1]
+ pq-crypto/bike_r3/gf2x_mul_portable.c [1:1]
+ pq-crypto/bike_r3/sampling.c [1:1]
+ pq-crypto/bike_r3/sampling.h [1:1]
+ pq-crypto/bike_r3/sampling_avx2.c [1:1]
+ pq-crypto/bike_r3/sampling_avx512.c [1:1]
+ pq-crypto/bike_r3/sampling_internal.h [1:1]
+ pq-crypto/bike_r3/sampling_portable.c [1:1]
+ pq-crypto/bike_r3/sha.h [1:1]
+ pq-crypto/bike_r3/types.h [1:1]
+ pq-crypto/bike_r3/utilities.c [1:1]
+ pq-crypto/bike_r3/utilities.h [1:1]
+ pq-crypto/bike_r3/x86_64_intrinsic.h [1:1]
pq-crypto/s2n_pq.c [2:2]
pq-crypto/s2n_pq.h [2:2]
pq-crypto/s2n_pq_random.c [2:2]
@@ -151,6 +195,8 @@ BELONGS ya.make
stuffer/s2n_stuffer_text.c [2:2]
tls/extensions/s2n_client_alpn.c [2:2]
tls/extensions/s2n_client_alpn.h [2:2]
+ tls/extensions/s2n_client_early_data_indication.c [2:2]
+ tls/extensions/s2n_client_ems.c [2:2]
tls/extensions/s2n_client_key_share.c [2:2]
tls/extensions/s2n_client_key_share.h [2:2]
tls/extensions/s2n_client_max_frag_len.c [2:2]
@@ -177,8 +223,10 @@ BELONGS ya.make
tls/extensions/s2n_client_supported_versions.h [2:2]
tls/extensions/s2n_cookie.c [2:2]
tls/extensions/s2n_cookie.h [2:2]
+ tls/extensions/s2n_early_data_indication.h [2:2]
tls/extensions/s2n_ec_point_format.c [2:2]
tls/extensions/s2n_ec_point_format.h [2:2]
+ tls/extensions/s2n_ems.h [2:2]
tls/extensions/s2n_extension_list.c [2:2]
tls/extensions/s2n_extension_list.h [2:2]
tls/extensions/s2n_extension_type.c [2:2]
@@ -187,6 +235,7 @@ BELONGS ya.make
tls/extensions/s2n_extension_type_lists.h [2:2]
tls/extensions/s2n_key_share.c [2:2]
tls/extensions/s2n_key_share.h [2:2]
+ tls/extensions/s2n_nst_early_data_indication.c [2:2]
tls/extensions/s2n_psk_key_exchange_modes.c [2:2]
tls/extensions/s2n_psk_key_exchange_modes.h [2:2]
tls/extensions/s2n_quic_transport_params.c [2:2]
@@ -195,6 +244,8 @@ BELONGS ya.make
tls/extensions/s2n_server_alpn.h [2:2]
tls/extensions/s2n_server_certificate_status.c [2:2]
tls/extensions/s2n_server_certificate_status.h [2:2]
+ tls/extensions/s2n_server_early_data_indication.c [2:2]
+ tls/extensions/s2n_server_ems.c [2:2]
tls/extensions/s2n_server_key_share.c [2:2]
tls/extensions/s2n_server_key_share.h [2:2]
tls/extensions/s2n_server_max_fragment_length.c [2:2]
@@ -235,6 +286,7 @@ BELONGS ya.make
tls/s2n_client_finished.c [2:2]
tls/s2n_client_hello.c [2:2]
tls/s2n_client_hello.h [2:2]
+ tls/s2n_client_hello_request.c [2:2]
tls/s2n_client_key_exchange.c [2:2]
tls/s2n_client_key_exchange.h [2:2]
tls/s2n_config.c [2:2]
@@ -245,13 +297,21 @@ BELONGS ya.make
tls/s2n_connection_evp_digests.h [2:2]
tls/s2n_crypto.h [2:2]
tls/s2n_crypto_constants.h [2:2]
+ tls/s2n_early_data.c [2:2]
+ tls/s2n_early_data.h [2:2]
+ tls/s2n_early_data_io.c [2:2]
tls/s2n_ecc_preferences.c [2:2]
tls/s2n_ecc_preferences.h [2:2]
tls/s2n_encrypted_extensions.c [2:2]
tls/s2n_handshake.c [2:2]
tls/s2n_handshake.h [2:2]
+ tls/s2n_handshake_hashes.c [2:2]
+ tls/s2n_handshake_hashes.h [2:2]
tls/s2n_handshake_io.c [2:2]
tls/s2n_handshake_transcript.c [2:2]
+ tls/s2n_handshake_type.c [2:2]
+ tls/s2n_handshake_type.h [2:2]
+ tls/s2n_internal.h [2:2]
tls/s2n_kem.c [2:2]
tls/s2n_kem.h [2:2]
tls/s2n_kem_preferences.c [2:2]
@@ -259,6 +319,8 @@ BELONGS ya.make
tls/s2n_kex.c [2:2]
tls/s2n_kex.h [2:2]
tls/s2n_kex_data.h [2:2]
+ tls/s2n_key_log.c [2:2]
+ tls/s2n_key_log.h [2:2]
tls/s2n_key_update.c [2:2]
tls/s2n_key_update.h [2:2]
tls/s2n_ocsp_stapling.c [2:2]
@@ -267,6 +329,7 @@ BELONGS ya.make
tls/s2n_prf.c [2:2]
tls/s2n_prf.h [2:2]
tls/s2n_protocol_preferences.c [2:2]
+ tls/s2n_protocol_preferences.h [2:2]
tls/s2n_psk.c [2:2]
tls/s2n_psk.h [2:2]
tls/s2n_quic_support.c [2:2]
@@ -309,7 +372,10 @@ BELONGS ya.make
tls/s2n_tls13_certificate_verify.h [2:2]
tls/s2n_tls13_handshake.c [2:2]
tls/s2n_tls13_handshake.h [2:2]
- tls/s2n_tls_digest_preferences.h [2:2]
+ tls/s2n_tls13_key_schedule.c [2:2]
+ tls/s2n_tls13_key_schedule.h [2:2]
+ tls/s2n_tls13_secrets.c [2:2]
+ tls/s2n_tls13_secrets.h [2:2]
tls/s2n_tls_parameters.h [2:2]
tls/s2n_x509_validator.c [2:2]
tls/s2n_x509_validator.h [2:2]
@@ -324,6 +390,8 @@ BELONGS ya.make
utils/s2n_compiler.h [2:2]
utils/s2n_ensure.c [2:2]
utils/s2n_ensure.h [2:2]
+ utils/s2n_fork_detection.c [2:2]
+ utils/s2n_fork_detection.h [2:2]
utils/s2n_init.c [2:2]
utils/s2n_map.c [2:2]
utils/s2n_map.h [2:2]
@@ -338,15 +406,25 @@ BELONGS ya.make
utils/s2n_rfc5952.h [3:3]
utils/s2n_safety.c [2:2]
utils/s2n_safety.h [2:2]
+ utils/s2n_safety_macros.h [3:3]
utils/s2n_set.c [2:2]
utils/s2n_set.h [2:2]
utils/s2n_socket.c [2:2]
utils/s2n_socket.h [2:2]
- utils/s2n_str.c [2:2]
- utils/s2n_str.h [2:2]
utils/s2n_timer.c [2:2]
utils/s2n_timer.h [2:2]
+KEEP COPYRIGHT_SERVICE_LABEL 94df8e6c5401627dfdf66b8d42e0c2f7
+BELONGS ya.make
+ License text:
+ Copyright (c) 1998-2008, Brian Gladman, Worcester, UK. All rights reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ pq-crypto/kyber_r3/KeccakP-brg_endian_avx2.h [3:3]
+
KEEP COPYRIGHT_SERVICE_LABEL c04990393dc236bdadd91d1664e966b7
BELONGS ya.make
License text:
diff --git a/contrib/restricted/aws/s2n/.yandex_meta/devtools.licenses.report b/contrib/restricted/aws/s2n/.yandex_meta/devtools.licenses.report
index 42c940cc02..8ac59c754b 100644
--- a/contrib/restricted/aws/s2n/.yandex_meta/devtools.licenses.report
+++ b/contrib/restricted/aws/s2n/.yandex_meta/devtools.licenses.report
@@ -31,7 +31,7 @@
KEEP Apache-2.0 2b42edef8fa55315f34f2370b4715ca9
BELONGS ya.make
-FILE_INCLUDE NOTICE found in files: LICENSE at line 107, LICENSE at line 110, LICENSE at line 112, LICENSE at line 117, LICENSE at line 120, LICENSE at line 142, pq-crypto/bike_r1/LICENSE at line 107, pq-crypto/bike_r1/LICENSE at line 110, pq-crypto/bike_r1/LICENSE at line 112, pq-crypto/bike_r1/LICENSE at line 117, pq-crypto/bike_r1/LICENSE at line 120, pq-crypto/bike_r1/LICENSE at line 142, pq-crypto/bike_r2/LICENSE at line 107, pq-crypto/bike_r2/LICENSE at line 110, pq-crypto/bike_r2/LICENSE at line 112, pq-crypto/bike_r2/LICENSE at line 117, pq-crypto/bike_r2/LICENSE at line 120, pq-crypto/bike_r2/LICENSE at line 142
+FILE_INCLUDE NOTICE found in files: LICENSE at line 107, LICENSE at line 110, LICENSE at line 112, LICENSE at line 117, LICENSE at line 120, LICENSE at line 142, pq-crypto/bike_r1/LICENSE at line 107, pq-crypto/bike_r1/LICENSE at line 110, pq-crypto/bike_r1/LICENSE at line 112, pq-crypto/bike_r1/LICENSE at line 117, pq-crypto/bike_r1/LICENSE at line 120, pq-crypto/bike_r1/LICENSE at line 142, pq-crypto/bike_r2/LICENSE at line 107, pq-crypto/bike_r2/LICENSE at line 110, pq-crypto/bike_r2/LICENSE at line 112, pq-crypto/bike_r2/LICENSE at line 117, pq-crypto/bike_r2/LICENSE at line 120, pq-crypto/bike_r2/LICENSE at line 142, pq-crypto/bike_r3/LICENSE at line 107, pq-crypto/bike_r3/LICENSE at line 110, pq-crypto/bike_r3/LICENSE at line 112, pq-crypto/bike_r3/LICENSE at line 117, pq-crypto/bike_r3/LICENSE at line 120, pq-crypto/bike_r3/LICENSE at line 142
Note: matched license text is too long. Read it in the source files.
Scancode info:
Original SPDX id: Apache-2.0
@@ -42,6 +42,7 @@ FILE_INCLUDE NOTICE found in files: LICENSE at line 107, LICENSE at line 110, LI
LICENSE [2:202]
pq-crypto/bike_r1/LICENSE [2:202]
pq-crypto/bike_r2/LICENSE [2:202]
+ pq-crypto/bike_r3/LICENSE [2:202]
KEEP MIT 3158a75221c70090b7e127aa7c980dd8
BELONGS ya.make
@@ -101,7 +102,7 @@ BELONGS ya.make
Files with this license:
pq-crypto/kyber_90s_r2/sha2_c.c [6:6]
pq-crypto/kyber_r2/fips202_kyber_r2.c [1:1]
- pq-crypto/sike_r2/fips202.c [1:1]
+ pq-crypto/kyber_r3/kyber512r3_fips202.c [1:1]
KEEP Public-Domain 4a195ae04383fd8b4accb4b7dfa4a8fd
BELONGS ya.make
@@ -114,7 +115,7 @@ BELONGS ya.make
Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
Files with this license:
pq-crypto/kyber_r2/fips202_kyber_r2.c [4:4]
- pq-crypto/sike_r2/fips202.c [4:4]
+ pq-crypto/kyber_r3/kyber512r3_fips202.c [4:4]
KEEP Apache-2.0 4a2e07381780102963e0358ca3a73d9d
BELONGS ya.make
@@ -142,6 +143,8 @@ BELONGS ya.make
Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
Files with this license:
pq-crypto/sike_r1/fips202_r1.c [6:6]
+ pq-crypto/sike_r3/sikep434r3_fips202.c [6:6]
+ pq-crypto/sike_r3/sikep434r3_fips202.h [6:6]
KEEP Public-Domain 57ef9200aa29fa2802feeb707d6933a6
BELONGS ya.make
@@ -154,11 +157,13 @@ BELONGS ya.make
Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
Files with this license:
pq-crypto/sike_r1/fips202_r1.c [4:4]
+ pq-crypto/sike_r3/sikep434r3_fips202.c [4:4]
+ pq-crypto/sike_r3/sikep434r3_fips202.h [4:4]
KEEP Apache-2.0 5b1f5708e6e10c9740fe8b501f9b7c9e
BELONGS ya.make
License text:
- s2n is a C99 implementation of the TLS/SSL protocols that is designed to be simple, small, fast, and with security as a priority. It is released and licensed under the Apache License 2.0.
+ s2n-tls is a C99 implementation of the TLS/SSL protocols that is designed to be simple, small, fast, and with security as a priority. It is released and licensed under the Apache License 2.0.
Scancode info:
Original SPDX id: Apache-2.0
Score : 100.00
@@ -167,6 +172,21 @@ BELONGS ya.make
Files with this license:
README.md [3:3]
+KEEP Brian-Gladman-3-Clause 5c633a0224a7fbc49111f08904d03f31
+BELONGS ya.make
+FILE_INCLUDE LICENSE found in files: pq-crypto/kyber_r3/KeccakP-brg_endian_avx2.h at line 5
+FILE_INCLUDE pq-crypto/bike_r1/LICENSE found in files: pq-crypto/kyber_r3/KeccakP-brg_endian_avx2.h at line 5
+FILE_INCLUDE pq-crypto/bike_r2/LICENSE found in files: pq-crypto/kyber_r3/KeccakP-brg_endian_avx2.h at line 5
+FILE_INCLUDE pq-crypto/bike_r3/LICENSE found in files: pq-crypto/kyber_r3/KeccakP-brg_endian_avx2.h at line 5
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-brian-gladman-3-clause
+ Score : 100.00
+ Match type : NOTICE
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/brian-gladman-3-clause.LICENSE
+ Files with this license:
+ pq-crypto/kyber_r3/KeccakP-brg_endian_avx2.h [5:23]
+
KEEP MIT 6fe4646e33a88690f9a284e7cf5c5522
BELONGS ya.make
License text:
@@ -275,6 +295,7 @@ BELONGS ya.make
crypto/s2n_cipher.c [4:13]
crypto/s2n_cipher.h [4:13]
crypto/s2n_composite_cipher_aes_sha.c [4:13]
+ crypto/s2n_crypto.c [4:13]
crypto/s2n_crypto.h [4:13]
crypto/s2n_dhe.c [4:13]
crypto/s2n_dhe.h [4:13]
@@ -286,6 +307,8 @@ BELONGS ya.make
crypto/s2n_ecdsa.h [4:13]
crypto/s2n_evp.c [4:13]
crypto/s2n_evp.h [4:13]
+ crypto/s2n_evp_signing.c [4:13]
+ crypto/s2n_evp_signing.h [4:13]
crypto/s2n_fips.c [4:13]
crypto/s2n_fips.h [4:13]
crypto/s2n_hash.c [4:13]
@@ -294,8 +317,10 @@ BELONGS ya.make
crypto/s2n_hkdf.h [4:13]
crypto/s2n_hmac.c [4:13]
crypto/s2n_hmac.h [4:13]
+ crypto/s2n_libcrypto.c [4:13]
crypto/s2n_openssl.h [4:13]
crypto/s2n_openssl_evp.h [4:13]
+ crypto/s2n_openssl_x509.c [4:13]
crypto/s2n_openssl_x509.h [4:13]
crypto/s2n_pkey.c [4:13]
crypto/s2n_pkey.h [4:13]
@@ -327,6 +352,8 @@ BELONGS ya.make
stuffer/s2n_stuffer_text.c [4:13]
tls/extensions/s2n_client_alpn.c [4:13]
tls/extensions/s2n_client_alpn.h [4:13]
+ tls/extensions/s2n_client_early_data_indication.c [4:13]
+ tls/extensions/s2n_client_ems.c [4:13]
tls/extensions/s2n_client_key_share.c [4:13]
tls/extensions/s2n_client_key_share.h [4:13]
tls/extensions/s2n_client_max_frag_len.c [4:13]
@@ -353,8 +380,10 @@ BELONGS ya.make
tls/extensions/s2n_client_supported_versions.h [4:13]
tls/extensions/s2n_cookie.c [4:13]
tls/extensions/s2n_cookie.h [4:13]
+ tls/extensions/s2n_early_data_indication.h [4:13]
tls/extensions/s2n_ec_point_format.c [4:13]
tls/extensions/s2n_ec_point_format.h [4:13]
+ tls/extensions/s2n_ems.h [4:13]
tls/extensions/s2n_extension_list.c [4:13]
tls/extensions/s2n_extension_list.h [4:13]
tls/extensions/s2n_extension_type.c [4:13]
@@ -363,6 +392,7 @@ BELONGS ya.make
tls/extensions/s2n_extension_type_lists.h [4:13]
tls/extensions/s2n_key_share.c [4:13]
tls/extensions/s2n_key_share.h [4:13]
+ tls/extensions/s2n_nst_early_data_indication.c [4:13]
tls/extensions/s2n_psk_key_exchange_modes.c [4:13]
tls/extensions/s2n_psk_key_exchange_modes.h [4:13]
tls/extensions/s2n_quic_transport_params.c [4:13]
@@ -371,6 +401,8 @@ BELONGS ya.make
tls/extensions/s2n_server_alpn.h [4:13]
tls/extensions/s2n_server_certificate_status.c [4:13]
tls/extensions/s2n_server_certificate_status.h [4:13]
+ tls/extensions/s2n_server_early_data_indication.c [4:13]
+ tls/extensions/s2n_server_ems.c [4:13]
tls/extensions/s2n_server_key_share.c [4:13]
tls/extensions/s2n_server_key_share.h [4:13]
tls/extensions/s2n_server_max_fragment_length.c [4:13]
@@ -411,6 +443,7 @@ BELONGS ya.make
tls/s2n_client_finished.c [4:13]
tls/s2n_client_hello.c [4:13]
tls/s2n_client_hello.h [4:13]
+ tls/s2n_client_hello_request.c [4:13]
tls/s2n_client_key_exchange.c [4:13]
tls/s2n_client_key_exchange.h [4:13]
tls/s2n_config.c [4:13]
@@ -420,14 +453,22 @@ BELONGS ya.make
tls/s2n_connection_evp_digests.h [4:13]
tls/s2n_crypto.h [4:13]
tls/s2n_crypto_constants.h [4:13]
+ tls/s2n_early_data.c [4:13]
+ tls/s2n_early_data.h [4:13]
+ tls/s2n_early_data_io.c [4:13]
tls/s2n_ecc_preferences.c [4:13]
tls/s2n_ecc_preferences.h [4:13]
tls/s2n_encrypted_extensions.c [4:13]
tls/s2n_establish_session.c [4:13]
tls/s2n_handshake.c [4:13]
tls/s2n_handshake.h [4:13]
+ tls/s2n_handshake_hashes.c [4:13]
+ tls/s2n_handshake_hashes.h [4:13]
tls/s2n_handshake_io.c [4:13]
tls/s2n_handshake_transcript.c [4:13]
+ tls/s2n_handshake_type.c [4:13]
+ tls/s2n_handshake_type.h [4:13]
+ tls/s2n_internal.h [4:13]
tls/s2n_kem.c [4:13]
tls/s2n_kem.h [4:13]
tls/s2n_kem_preferences.c [4:13]
@@ -435,6 +476,8 @@ BELONGS ya.make
tls/s2n_kex.c [4:13]
tls/s2n_kex.h [4:13]
tls/s2n_kex_data.h [4:13]
+ tls/s2n_key_log.c [4:13]
+ tls/s2n_key_log.h [4:13]
tls/s2n_key_update.c [4:13]
tls/s2n_key_update.h [4:13]
tls/s2n_ocsp_stapling.c [4:13]
@@ -443,6 +486,7 @@ BELONGS ya.make
tls/s2n_prf.c [4:13]
tls/s2n_prf.h [4:13]
tls/s2n_protocol_preferences.c [4:13]
+ tls/s2n_protocol_preferences.h [4:13]
tls/s2n_psk.c [4:13]
tls/s2n_psk.h [4:13]
tls/s2n_quic_support.c [4:13]
@@ -485,7 +529,10 @@ BELONGS ya.make
tls/s2n_tls13_certificate_verify.h [4:13]
tls/s2n_tls13_handshake.c [4:13]
tls/s2n_tls13_handshake.h [4:13]
- tls/s2n_tls_digest_preferences.h [4:13]
+ tls/s2n_tls13_key_schedule.c [4:13]
+ tls/s2n_tls13_key_schedule.h [4:13]
+ tls/s2n_tls13_secrets.c [4:13]
+ tls/s2n_tls13_secrets.h [4:13]
tls/s2n_tls_parameters.h [4:13]
tls/s2n_x509_validator.c [4:13]
tls/s2n_x509_validator.h [4:13]
@@ -500,6 +547,8 @@ BELONGS ya.make
utils/s2n_compiler.h [4:13]
utils/s2n_ensure.c [4:13]
utils/s2n_ensure.h [4:13]
+ utils/s2n_fork_detection.c [4:13]
+ utils/s2n_fork_detection.h [4:13]
utils/s2n_init.c [4:13]
utils/s2n_map.c [4:13]
utils/s2n_map.h [4:13]
@@ -513,12 +562,11 @@ BELONGS ya.make
utils/s2n_rfc5952.c [4:13]
utils/s2n_safety.c [4:13]
utils/s2n_safety.h [4:13]
+ utils/s2n_safety_macros.h [5:14]
utils/s2n_set.c [4:13]
utils/s2n_set.h [4:13]
utils/s2n_socket.c [4:13]
utils/s2n_socket.h [4:13]
- utils/s2n_str.c [4:13]
- utils/s2n_str.h [4:13]
utils/s2n_timer.c [4:13]
utils/s2n_timer.h [4:13]
@@ -594,11 +642,50 @@ BELONGS ya.make
pq-crypto/bike_r2/types.h [2:2]
pq-crypto/bike_r2/utilities.c [2:2]
pq-crypto/bike_r2/utilities.h [2:2]
+ pq-crypto/bike_r3/aes.h [2:2]
+ pq-crypto/bike_r3/aes_ctr_prf.c [2:2]
+ pq-crypto/bike_r3/aes_ctr_prf.h [2:2]
+ pq-crypto/bike_r3/bike_defs.h [2:2]
+ pq-crypto/bike_r3/bike_r3_kem.c [2:2]
+ pq-crypto/bike_r3/cleanup.h [2:2]
+ pq-crypto/bike_r3/decode.c [2:2]
+ pq-crypto/bike_r3/decode.h [2:2]
+ pq-crypto/bike_r3/decode_avx2.c [2:2]
+ pq-crypto/bike_r3/decode_avx512.c [2:2]
+ pq-crypto/bike_r3/decode_internal.h [2:2]
+ pq-crypto/bike_r3/decode_portable.c [2:2]
+ pq-crypto/bike_r3/defs.h [2:2]
+ pq-crypto/bike_r3/error.c [2:2]
+ pq-crypto/bike_r3/error.h [2:2]
+ pq-crypto/bike_r3/gf2x.h [2:2]
+ pq-crypto/bike_r3/gf2x_internal.h [2:2]
+ pq-crypto/bike_r3/gf2x_inv.c [2:2]
+ pq-crypto/bike_r3/gf2x_ksqr_avx2.c [2:2]
+ pq-crypto/bike_r3/gf2x_ksqr_avx512.c [2:2]
+ pq-crypto/bike_r3/gf2x_ksqr_portable.c [2:2]
+ pq-crypto/bike_r3/gf2x_mul.c [2:2]
+ pq-crypto/bike_r3/gf2x_mul_avx2.c [2:2]
+ pq-crypto/bike_r3/gf2x_mul_avx512.c [2:2]
+ pq-crypto/bike_r3/gf2x_mul_base_pclmul.c [2:2]
+ pq-crypto/bike_r3/gf2x_mul_base_portable.c [2:2]
+ pq-crypto/bike_r3/gf2x_mul_base_vpclmul.c [2:2]
+ pq-crypto/bike_r3/gf2x_mul_portable.c [2:2]
+ pq-crypto/bike_r3/sampling.c [2:2]
+ pq-crypto/bike_r3/sampling.h [2:2]
+ pq-crypto/bike_r3/sampling_avx2.c [2:2]
+ pq-crypto/bike_r3/sampling_avx512.c [2:2]
+ pq-crypto/bike_r3/sampling_internal.h [2:2]
+ pq-crypto/bike_r3/sampling_portable.c [2:2]
+ pq-crypto/bike_r3/sha.h [2:2]
+ pq-crypto/bike_r3/types.h [2:2]
+ pq-crypto/bike_r3/utilities.c [2:2]
+ pq-crypto/bike_r3/utilities.h [2:2]
+ pq-crypto/bike_r3/x86_64_intrinsic.h [2:2]
KEEP Apache-2.0 f7d0ea4ca54b2001383f4d9f0cd6199f
BELONGS ya.make
License text:
- [![Apache 2 License](https://img.shields.io/github/license/awslabs/s2n.svg)](http://aws.amazon.com/apache-2-0/)
+ [![Apache 2 License](https://img.shields.io/github/license/aws/s2n-tls.svg)](http://aws.amazon.com/apache-2-0/)
Scancode info:
Original SPDX id: Apache-2.0
Score : 100.00
@@ -607,10 +694,24 @@ BELONGS ya.make
Files with this license:
README.md [6:6]
+KEEP CC0-1.0 f9ca2565454cb7a5765a25d0b8486c11
+BELONGS ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: CC0-1.0
+ Score : 90.00
+ Match type : NOTICE
+ Links : http://creativecommons.org/publicdomain/zero/1.0/, http://creativecommons.org/publicdomain/zero/1.0/legalcode, https://spdx.org/licenses/CC0-1.0
+ Files with this license:
+ pq-crypto/kyber_r3/KeccakP-1600-times4-SIMD256_avx2.c [3:13]
+ pq-crypto/kyber_r3/KeccakP-1600-times4-SIMD256_avx2.c [846:856]
+ pq-crypto/kyber_r3/KeccakP-1600-times4-SnP_avx2.h [3:13]
+ pq-crypto/kyber_r3/KeccakP-align_avx2.h [3:13]
+
KEEP Apache-2.0 fff0309371ea9c4919c24ec64989dd0f
BELONGS ya.make
License text:
- [![Apache 2 License](https://img.shields.io/github/license/awslabs/s2n.svg)](http://aws.amazon.com/apache-2-0/)
+ [![Apache 2 License](https://img.shields.io/github/license/aws/s2n-tls.svg)](http://aws.amazon.com/apache-2-0/)
Scancode info:
Original SPDX id: Apache-2.0
Score : 100.00
diff --git a/contrib/restricted/aws/s2n/.yandex_meta/licenses.list.txt b/contrib/restricted/aws/s2n/.yandex_meta/licenses.list.txt
index cd2051371c..c7ccce42eb 100644
--- a/contrib/restricted/aws/s2n/.yandex_meta/licenses.list.txt
+++ b/contrib/restricted/aws/s2n/.yandex_meta/licenses.list.txt
@@ -258,11 +258,47 @@
====================Apache-2.0====================
-[![Apache 2 License](https://img.shields.io/github/license/awslabs/s2n.svg)](http://aws.amazon.com/apache-2-0/)
+[![Apache 2 License](https://img.shields.io/github/license/aws/s2n-tls.svg)](http://aws.amazon.com/apache-2-0/)
====================Apache-2.0====================
-s2n is a C99 implementation of the TLS/SSL protocols that is designed to be simple, small, fast, and with security as a priority. It is released and licensed under the Apache License 2.0.
+s2n-tls is a C99 implementation of the TLS/SSL protocols that is designed to be simple, small, fast, and with security as a priority. It is released and licensed under the Apache License 2.0.
+
+
+====================Brian-Gladman-3-Clause====================
+ LICENSE TERMS
+
+ The redistribution and use of this software (with or without changes)
+ is allowed without the payment of fees or royalties provided that:
+
+ 1. source code distributions include the above copyright notice, this
+ list of conditions and the following disclaimer;
+
+ 2. binary distributions include the above copyright notice, this list
+ of conditions and the following disclaimer in their documentation;
+
+ 3. the name of the copyright holder is not used to endorse products
+ built using this software without specific written permission.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+
+
+====================CC0-1.0====================
+Joan Daemen, Michaël Peeters, Gilles Van Assche and Ronny Van Keer, hereby
+denoted as "the implementer".
+
+For more information, feedback or questions, please refer to our websites:
+http://keccak.noekeon.org/
+http://keyak.noekeon.org/
+http://ketje.noekeon.org/
+
+To the extent possible under law, the implementer has waived all copyright
+and related or neighboring rights to the source code in this file.
+http://creativecommons.org/publicdomain/zero/1.0/
====================COPYRIGHT====================
@@ -274,6 +310,10 @@ s2n is a C99 implementation of the TLS/SSL protocols that is designed to be simp
====================COPYRIGHT====================
+ Copyright (c) 1998-2008, Brian Gladman, Worcester, UK. All rights reserved.
+
+
+====================COPYRIGHT====================
Copyright (c) 2016-2017 Microsoft Corporation
Copyright (c) 2017 InfoSec Global, Reza Azarderakhsh, Matthew
Campagna, Luca De Feo, Amir Jalali, David Jao,
@@ -281,11 +321,850 @@ Copyright (c) 2017 InfoSec Global, Reza Azarderakhsh, Matthew
All rights reserved.
+====================File: LICENSE====================
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+============================================================================
+ S2N SUBCOMPONENTS:
+
+ The s2n Project contains subcomponents with seperate copyright notices
+ and license terms. Your use of the source code for these subcomponents is
+ subject to the terms and conditions of the following licenses.
+
+
+========================================================================
+Third party MIT licenses
+========================================================================
+
+The following components are provided under the MIT License. See project link for details.
+
+
+ SIKE
+ -> s2n/pq-crypto/sike_r1/LICENSE.txt
+
+
+
+
+
====================File: NOTICE====================
s2n
Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+====================File: pq-crypto/bike_r1/LICENSE====================
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+====================File: pq-crypto/bike_r2/LICENSE====================
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+====================File: pq-crypto/bike_r3/LICENSE====================
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
====================MIT====================
* SPDX-License-Identifier: MIT
diff --git a/contrib/restricted/aws/s2n/CMakeLists.darwin.txt b/contrib/restricted/aws/s2n/CMakeLists.darwin.txt
index 98e44c90b4..bf42b0c84a 100644
--- a/contrib/restricted/aws/s2n/CMakeLists.darwin.txt
+++ b/contrib/restricted/aws/s2n/CMakeLists.darwin.txt
@@ -11,6 +11,16 @@ find_package(OpenSSL REQUIRED)
add_library(restricted-aws-s2n)
target_compile_options(restricted-aws-s2n PRIVATE
-DS2N_ADX
+ -DS2N_BIKE_R3_AVX2
+ -DS2N_BIKE_R3_AVX512
+ -DS2N_BIKE_R3_PCLMUL
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N___RESTRICT__SUPPORTED
-DS2N_HAVE_EXECINFO
-DS2N_CPUID_AVAILABLE
)
@@ -26,6 +36,24 @@ target_link_libraries(restricted-aws-s2n PUBLIC
OpenSSL::OpenSSL
)
target_sources(restricted-aws-s2n PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_pclmul.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-1600-times4-SIMD256_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_consts_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202x4_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_rejsample_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx512.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx512.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx512.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx512.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_vpclmul.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_aes_gcm.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_chacha20_poly1305.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_cbc_cipher_3des.c
@@ -33,15 +61,19 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_certificate.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_cipher.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_composite_cipher_aes_sha.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_crypto.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_dhe.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_drbg.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_ecc_evp.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_ecdsa.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_evp.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_evp_signing.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_fips.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_hash.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_hkdf.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_hmac.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_libcrypto.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_openssl_x509.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_pkey.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_rsa.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_rsa_pss.c
@@ -75,6 +107,19 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling_portable.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r2/secure_decode_portable.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r2/utilities.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes_ctr_prf.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/bike_r3_kem.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_portable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/error.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_inv.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_portable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_portable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_portable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_portable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/utilities.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/aes256ctr.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/aes_c.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/cbd.c
@@ -96,14 +141,33 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/reduce.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/symmetric-fips202.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/verify.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_basemul_avx2.S
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fq_avx2.S
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_invntt_avx2.S
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_kem.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt_avx2.S
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_shuffle_avx2.S
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_symmetric-shake.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/s2n_pq.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/s2n_pq_random.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r1/P503_r1.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r1/fips202_r1.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r1/fp_generic_r1.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r1/sike_r1_kem.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r2/fips202.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_ec_isogeny.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fips202.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fpx.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_kem.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_sidh.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/stuffer/s2n_stuffer.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_base64.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_file.c
@@ -111,6 +175,8 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_pem.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_text.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_client_alpn.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_client_early_data_indication.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_client_ems.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_client_key_share.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_client_max_frag_len.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_client_pq_kem.c
@@ -129,10 +195,13 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type_lists.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_key_share.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_nst_early_data_indication.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_psk_key_exchange_modes.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_quic_transport_params.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_alpn.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_certificate_status.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_early_data_indication.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_ems.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_key_share.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_max_fragment_length.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_psk.c
@@ -156,19 +225,25 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_client_cert_verify.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_client_finished.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_client_hello.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_client_hello_request.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_client_key_exchange.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_config.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_connection.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_connection_evp_digests.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_early_data.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_early_data_io.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_ecc_preferences.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_encrypted_extensions.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_establish_session.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_handshake.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_handshake_hashes.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_handshake_io.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_handshake_transcript.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_handshake_type.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_kem.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_kem_preferences.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_kex.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_key_log.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_key_update.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_ocsp_stapling.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_post_handshake.c
@@ -202,11 +277,14 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_tls13.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_tls13_certificate_verify.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_tls13_handshake.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_tls13_key_schedule.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_tls13_secrets.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_x509_validator.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_array.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_asn1_time.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_blob.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_ensure.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_fork_detection.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_init.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_map.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_mem.c
@@ -216,6 +294,206 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_safety.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_set.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_socket.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_str.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_timer.c
)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_pclmul.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mpclmul
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-1600-times4-SIMD256_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_consts_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202x4_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_rejsample_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx512.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx512f
+ -mavx512cd
+ -mavx512bw
+ -mavx512dq
+ -mavx512vl
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx512.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx512f
+ -mavx512cd
+ -mavx512bw
+ -mavx512dq
+ -mavx512vl
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx512.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx512f
+ -mavx512cd
+ -mavx512bw
+ -mavx512dq
+ -mavx512vl
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx512.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx512f
+ -mavx512cd
+ -mavx512bw
+ -mavx512dq
+ -mavx512vl
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_vpclmul.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx512f
+ -mavx512cd
+ -mavx512bw
+ -mavx512dq
+ -mavx512vl
+ -mvpclmulqdq
+)
diff --git a/contrib/restricted/aws/s2n/CMakeLists.linux.txt b/contrib/restricted/aws/s2n/CMakeLists.linux.txt
index d7ae8d807b..e66c27d911 100644
--- a/contrib/restricted/aws/s2n/CMakeLists.linux.txt
+++ b/contrib/restricted/aws/s2n/CMakeLists.linux.txt
@@ -11,8 +11,19 @@ find_package(OpenSSL REQUIRED)
add_library(restricted-aws-s2n)
target_compile_options(restricted-aws-s2n PRIVATE
-DS2N_ADX
+ -DS2N_BIKE_R3_AVX2
+ -DS2N_BIKE_R3_AVX512
+ -DS2N_BIKE_R3_PCLMUL
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N___RESTRICT__SUPPORTED
+ -DS2N_SIKE_P434_R3_ASM
+ -DS2N_FEATURES_AVAILABLE
-DS2N_HAVE_EXECINFO
- -DS2N_SIKEP434R2_ASM
-DS2N_CPUID_AVAILABLE
)
target_include_directories(restricted-aws-s2n PUBLIC
@@ -27,7 +38,25 @@ target_link_libraries(restricted-aws-s2n PUBLIC
OpenSSL::OpenSSL
)
target_sources(restricted-aws-s2n PRIVATE
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r2/sikep434r2_fp_x64_asm.S
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_pclmul.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-1600-times4-SIMD256_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_consts_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202x4_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_rejsample_avx2.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx512.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx512.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx512.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx512.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_vpclmul.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp_x64_asm.S
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_aes_gcm.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_chacha20_poly1305.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_cbc_cipher_3des.c
@@ -35,15 +64,19 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_certificate.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_cipher.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_composite_cipher_aes_sha.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_crypto.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_dhe.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_drbg.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_ecc_evp.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_ecdsa.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_evp.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_evp_signing.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_fips.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_hash.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_hkdf.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_hmac.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_libcrypto.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_openssl_x509.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_pkey.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_rsa.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/crypto/s2n_rsa_pss.c
@@ -77,6 +110,19 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling_portable.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r2/secure_decode_portable.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r2/utilities.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes_ctr_prf.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/bike_r3_kem.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_portable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/error.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_inv.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_portable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_portable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_portable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_portable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/utilities.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/aes256ctr.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/aes_c.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/cbd.c
@@ -98,14 +144,33 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/reduce.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/symmetric-fips202.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/verify.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_basemul_avx2.S
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fq_avx2.S
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_invntt_avx2.S
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_kem.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt_avx2.S
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_shuffle_avx2.S
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_symmetric-shake.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/s2n_pq.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/s2n_pq_random.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r1/P503_r1.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r1/fips202_r1.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r1/fp_generic_r1.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r1/sike_r1_kem.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r2/fips202.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_ec_isogeny.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fips202.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fpx.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_kem.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_sidh.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/stuffer/s2n_stuffer.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_base64.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_file.c
@@ -113,6 +178,8 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_pem.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_text.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_client_alpn.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_client_early_data_indication.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_client_ems.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_client_key_share.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_client_max_frag_len.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_client_pq_kem.c
@@ -131,10 +198,13 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type_lists.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_key_share.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_nst_early_data_indication.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_psk_key_exchange_modes.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_quic_transport_params.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_alpn.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_certificate_status.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_early_data_indication.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_ems.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_key_share.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_max_fragment_length.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/extensions/s2n_server_psk.c
@@ -158,19 +228,25 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_client_cert_verify.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_client_finished.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_client_hello.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_client_hello_request.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_client_key_exchange.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_config.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_connection.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_connection_evp_digests.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_early_data.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_early_data_io.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_ecc_preferences.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_encrypted_extensions.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_establish_session.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_handshake.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_handshake_hashes.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_handshake_io.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_handshake_transcript.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_handshake_type.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_kem.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_kem_preferences.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_kex.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_key_log.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_key_update.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_ocsp_stapling.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_post_handshake.c
@@ -204,11 +280,14 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_tls13.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_tls13_certificate_verify.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_tls13_handshake.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_tls13_key_schedule.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_tls13_secrets.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/tls/s2n_x509_validator.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_array.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_asn1_time.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_blob.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_ensure.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_fork_detection.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_init.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_map.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_mem.c
@@ -218,6 +297,206 @@ target_sources(restricted-aws-s2n PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_safety.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_set.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_socket.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_str.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/utils/s2n_timer.c
)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_pclmul.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mpclmul
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-1600-times4-SIMD256_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_consts_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202x4_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_rejsample_avx2.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx2
+ -mfma
+ -mbmi
+ -mbmi2
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx512.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx512f
+ -mavx512cd
+ -mavx512bw
+ -mavx512dq
+ -mavx512vl
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx512.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx512f
+ -mavx512cd
+ -mavx512bw
+ -mavx512dq
+ -mavx512vl
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx512.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx512f
+ -mavx512cd
+ -mavx512bw
+ -mavx512dq
+ -mavx512vl
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx512.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx512f
+ -mavx512cd
+ -mavx512bw
+ -mavx512dq
+ -mavx512vl
+)
+set_property(
+ SOURCE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_vpclmul.c
+ APPEND
+ PROPERTY
+ COMPILE_OPTIONS
+ -mavx512f
+ -mavx512cd
+ -mavx512bw
+ -mavx512dq
+ -mavx512vl
+ -mvpclmulqdq
+)
diff --git a/contrib/restricted/aws/s2n/README.md b/contrib/restricted/aws/s2n/README.md
index cdca230e20..6456d973d5 100644
--- a/contrib/restricted/aws/s2n/README.md
+++ b/contrib/restricted/aws/s2n/README.md
@@ -1,22 +1,22 @@
<img src="docs/images/s2n_logo_github.png" alt="s2n">
-s2n is a C99 implementation of the TLS/SSL protocols that is designed to be simple, small, fast, and with security as a priority. It is released and licensed under the Apache License 2.0.
+s2n-tls is a C99 implementation of the TLS/SSL protocols that is designed to be simple, small, fast, and with security as a priority. It is released and licensed under the Apache License 2.0.
-[![Build Status](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiMndlTzJNbHVxWEo3Nm82alp4eGdGNm4rTWdxZDVYU2VTbitIR0ZLbHVtcFFGOW5majk5QnhqaUp3ZEkydG1ueWg0NGlhRE43a1ZnUzZaQTVnSm91TzFFPSIsIml2UGFyYW1ldGVyU3BlYyI6IlJLbW42NENlYXhJNy80QnYiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=main)](https://github.com/awslabs/s2n/)
-[![Apache 2 License](https://img.shields.io/github/license/awslabs/s2n.svg)](http://aws.amazon.com/apache-2-0/)
+[![Build Status](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiMndlTzJNbHVxWEo3Nm82alp4eGdGNm4rTWdxZDVYU2VTbitIR0ZLbHVtcFFGOW5majk5QnhqaUp3ZEkydG1ueWg0NGlhRE43a1ZnUzZaQTVnSm91TzFFPSIsIml2UGFyYW1ldGVyU3BlYyI6IlJLbW42NENlYXhJNy80QnYiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=main)](https://github.com/aws/s2n-tls/)
+[![Apache 2 License](https://img.shields.io/github/license/aws/s2n-tls.svg)](http://aws.amazon.com/apache-2-0/)
[![C99](https://img.shields.io/badge/language-C99-blue.svg)](http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1256.pdf)
-[![Language grade: C/C++](https://img.shields.io/lgtm/grade/cpp/g/awslabs/s2n.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/awslabs/s2n/context:cpp)
-[![codecov](https://codecov.io/gh/awslabs/s2n/branch/main/graph/badge.svg)](https://codecov.io/gh/awslabs/s2n)
-[![Github forks](https://img.shields.io/github/forks/awslabs/s2n.svg)](https://github.com/awslabs/s2n/network)
-[![Github stars](https://img.shields.io/github/stars/awslabs/s2n.svg)](https://github.com/awslabs/s2n/stargazers)
+[![Language grade: C/C++](https://img.shields.io/lgtm/grade/cpp/g/aws/s2n-tls.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/aws/s2n-tls/context:cpp)
+[![codecov](https://codecov.io/gh/aws/s2n-tls/branch/main/graph/badge.svg)](https://codecov.io/gh/aws/s2n-tls)
+[![Github forks](https://img.shields.io/github/forks/aws/s2n-tls.svg)](https://github.com/aws/s2n-tls/network)
+[![Github stars](https://img.shields.io/github/stars/aws/s2n-tls.svg)](https://github.com/aws/s2n-tls/stargazers)
[![Join the chat at https://gitter.im/awslabs/s2n](https://badges.gitter.im/awslabs/s2n.svg)](https://gitter.im/awslabs/s2n?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
## Quickstart for Ubuntu
-1. Fork s2n on GitHub
+1. Fork s2n-tls on GitHub
2. Run the following commands on Ubuntu.
```
-git clone https://github.com/${YOUR_GITHUB_ACCOUNT_NAME}/s2n.git
-cd s2n
+git clone https://github.com/${YOUR_GITHUB_ACCOUNT_NAME}/s2n-tls.git
+cd s2n-tls
# Pick an "env" line from the codebuild/codebuild.config file and run it, in this case choose the openssl-1.1.1 with GCC 9 build
S2N_LIBCRYPTO=openssl-1.1.1 BUILD_S2N=true TESTS=integration GCC_VERSION=9
@@ -35,28 +35,42 @@ If you are building on OSX, or simply don't want to execute the entire build scr
An example of building on OSX:
```sh
-brew install ninja cmake
-git clone https://github.com/${YOUR_GITHUB_ACCOUNT_NAME}/s2n.git
-mkdir s2n_build
-cd s2n_build
+# Install required dependencies using homebrew
+brew install ninja cmake coreutils openssl@1.1
-# Build with debug symbols and a specific OpenSSL version
-cmake -GNinja \
+# Clone the s2n-tls source repository into the `s2n-tls` directory
+git clone https://github.com/${YOUR_GITHUB_ACCOUNT_NAME}/s2n-tls.git
+cd s2n-tls
+
+# Create a build directory, and build s2n-tls with debug symbols and a specific OpenSSL version.
+cmake . -Bbuild -GNinja \
-DCMAKE_BUILD_TYPE=Debug \
- -DCMAKE_PREFIX_PATH=/usr/local/Cellar/openssl@1.1/1.1.1g \
- ../s2n
-ninja -j6
-CTEST_PARALLEL_LEVEL=5 ninja test
+ -DCMAKE_PREFIX_PATH=$(dirname $(dirname $(brew list openssl@1.1|grep libcrypto.dylib)))
+cmake --build ./build -j $(nproc)
+CTEST_PARALLEL_LEVEL=$(nproc) ninja -C build test
```
+### Amazonlinux2
+
+Install dependancies with `./codebuild/bin/install_al2_dependencies.sh` after cloning.
+
+```sh
+git clone https://github.com/${YOUR_GITHUB_ACCOUNT_NAME}/s2n-tls.git
+cd s2n-tls
+cmake . -Bbuild -DCMAKE_EXE_LINKER_FLAGS="-lcrypto -lz" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON
+cmake --build ./build -j $(nproc)
+CTEST_PARALLEL_LEVEL=$(nproc) make -C build test
+```
+
+
## Have a Question?
-If you have any questions about Submitting PR's, Opening Issues, s2n API usage, or something similar, we have a public chatroom available here to answer your questions: https://gitter.im/awslabs/s2n
+If you have any questions about Submitting PR's, Opening Issues, s2n-tls API usage, or something similar, we have a public chatroom available here to answer your questions: https://gitter.im/awslabs/s2n
Otherwise, if you think you might have found a security impacting issue, please instead follow [our Security Notification Process.](#security-issue-notifications)
-## Using s2n
+## Using s2n-tls
-The s2n I/O APIs are designed to be intuitive to developers familiar with the widely-used POSIX I/O APIs, and s2n supports blocking, non-blocking, and full-duplex I/O. Additionally there are no locks or mutexes within s2n.
+The s2n-tls I/O APIs are designed to be intuitive to developers familiar with the widely-used POSIX I/O APIs, and s2n-tls supports blocking, non-blocking, and full-duplex I/O. Additionally there are no locks or mutexes within s2n-tls.
```c
/* Create a server mode connection handle */
@@ -81,71 +95,71 @@ int bytes_written;
bytes_written = s2n_send(conn, "Hello World", sizeof("Hello World"), &blocked);
```
-For details on building the s2n library and how to use s2n in an application you are developing, see the [API Reference](https://github.com/awslabs/s2n/blob/main/docs/USAGE-GUIDE.md).
+For details on building the s2n-tls library and how to use s2n-tls in an application you are developing, see the [API Reference](https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md).
-## s2n features
+## s2n-tls features
-s2n implements SSLv3, TLS1.0, TLS1.1, and TLS1.2. For encryption, s2n supports 128-bit and 256-bit AES, in the CBC and GCM modes, ChaCha20, 3DES, and RC4. For forward secrecy, s2n supports both DHE and ECDHE. s2n also supports the Server Name Indicator (SNI), Application-Layer Protocol Negotiation (ALPN) and the Online Certificate Status Protocol (OCSP) TLS extensions. SSLv3, RC4, 3DES and DHE are each disabled by default for security reasons.
+s2n-tls implements SSLv3, TLS1.0, TLS1.1, TLS1.2, and TLS1.3. For encryption, s2n-tls supports 128-bit and 256-bit AES in the CBC and GCM modes, ChaCha20, 3DES, and RC4. For forward secrecy, s2n-tls supports both DHE and ECDHE. s2n-tls also supports the Server Name Indicator (SNI), Application-Layer Protocol Negotiation (ALPN), and Online Certificate Status Protocol (OCSP) TLS extensions. SSLv3, RC4, 3DES, and DHE are each disabled by default for security reasons.
-As it can be difficult to keep track of which encryption algorithms and protocols are best to use, s2n features a simple API to use the latest "default" set of preferences. If you prefer to remain on a specific version for backwards compatibility, that is also supported.
+As it can be difficult to keep track of which encryption algorithms and protocols are best to use, s2n-tls features a simple API to use the latest "default" set of preferences. If you prefer to remain on a specific version for backwards compatibility, that is also supported.
```c
-/* Use the latest s2n "default" set of ciphersuite and protocol preferences */
+/* Use the latest s2n-tls "default" set of ciphersuite and protocol preferences */
s2n_config_set_cipher_preferences(config, "default");
/* Use a specific set of preferences, update when you're ready */
s2n_config_set_cipher_preferences(config, "20150306")
```
-## s2n safety mechanisms
+## s2n-tls safety mechanisms
-Internally s2n takes a systematic approach to data protection and includes several mechanisms designed to improve safety.
+Internally s2n-tls takes a systematic approach to data protection and includes several mechanisms designed to improve safety.
##### Small and auditable code base
-Ignoring tests, blank lines and comments, s2n is about 6,000 lines of code. s2n's code is also structured and written with a focus on reviewability. All s2n code is subject to code review, and we plan to complete security evaluations of s2n on an annual basis.
+Ignoring tests, blank lines and comments, s2n-tls is about 6,000 lines of code. s2n's code is also structured and written with a focus on reviewability. All s2n-tls code is subject to code review, and we plan to complete security evaluations of s2n-tls on an annual basis.
-To date there have been two external code-level reviews of s2n, including one by a commercial security vendor. s2n has also been shared with some trusted members of the broader cryptography, security, and Open Source communities. Any issues discovered are always recorded in the s2n issue tracker.
+To date there have been two external code-level reviews of s2n-tls, including one by a commercial security vendor. s2n-tls has also been shared with some trusted members of the broader cryptography, security, and Open Source communities. Any issues discovered are always recorded in the s2n-tls issue tracker.
##### Static analysis, fuzz-testing and penetration testing
-In addition to code reviews, s2n is subject to regular static analysis, fuzz-testing, and penetration testing. Several penetration tests have occurred, including two by commercial vendors.
+In addition to code reviews, s2n-tls is subject to regular static analysis, fuzz-testing, and penetration testing. Several penetration tests have occurred, including two by commercial vendors.
##### Unit tests and end-to-end testing
-s2n includes positive and negative unit tests and end-to-end test cases.
+s2n-tls includes positive and negative unit tests and end-to-end test cases.
##### Erase on read
-s2n encrypts or erases plaintext data as quickly as possible. For example, decrypted data buffers are erased as they are read by the application.
+s2n-tls encrypts or erases plaintext data as quickly as possible. For example, decrypted data buffers are erased as they are read by the application.
##### Built-in memory protection
-s2n uses operating system features to protect data from being swapped to disk or appearing in core dumps.
+s2n-tls uses operating system features to protect data from being swapped to disk or appearing in core dumps.
##### Minimalist feature adoption
-s2n avoids implementing rarely used options and extensions, as well as features with a history of triggering protocol-level vulnerabilities. For example there is no support for session renegotiation or DTLS.
+s2n-tls avoids implementing rarely used options and extensions, as well as features with a history of triggering protocol-level vulnerabilities. For example there is no support for session renegotiation or DTLS.
##### Compartmentalized random number generation
-The security of TLS and its associated encryption algorithms depends upon secure random number generation. s2n provides every thread with two separate random number generators. One for "public" randomly generated data that may appear in the clear, and one for "private" data that should remain secret. This approach lessens the risk of potential predictability weaknesses in random number generation algorithms from leaking information across contexts.
+The security of TLS and its associated encryption algorithms depends upon secure random number generation. s2n-tls provides every thread with two separate random number generators. One for "public" randomly generated data that may appear in the clear, and one for "private" data that should remain secret. This approach lessens the risk of potential predictability weaknesses in random number generation algorithms from leaking information across contexts.
##### Modularized encryption
-s2n has been structured so that different encryption libraries may be used. Today s2n supports OpenSSL, LibreSSL, BoringSSL, and the Apple Common Crypto framework to perform the underlying cryptographic operations.
+s2n-tls has been structured so that different encryption libraries may be used. Today s2n-tls supports OpenSSL, LibreSSL, BoringSSL, and the Apple Common Crypto framework to perform the underlying cryptographic operations.
##### Timing blinding
-s2n includes structured support for blinding time-based side-channels that may leak sensitive data. For example, if s2n fails to parse a TLS record or handshake message, s2n will add a randomized delay of between 10 and 30 seconds, granular to nanoseconds, before responding. This raises the complexity of real-world timing side-channel attacks by a factor of at least tens of trillions.
+s2n-tls includes structured support for blinding time-based side-channels that may leak sensitive data. For example, if s2n-tls fails to parse a TLS record or handshake message, s2n-tls will add a randomized delay of between 10 and 30 seconds, granular to nanoseconds, before responding. This raises the complexity of real-world timing side-channel attacks by a factor of at least tens of trillions.
##### Table based state-machines
-s2n uses simple tables to drive the TLS/SSL state machines, making it difficult for invalid out-of-order states to arise.
+s2n-tls uses simple tables to drive the TLS/SSL state machines, making it difficult for invalid out-of-order states to arise.
##### C safety
-s2n is written in C, but makes light use of standard C library functions and wraps all memory handling, string handling, and serialization in systematic boundary-enforcing checks.
+s2n-tls is written in C, but makes light use of standard C library functions and wraps all memory handling, string handling, and serialization in systematic boundary-enforcing checks.
## Security issue notifications
-If you discover a potential security issue in s2n we ask that you notify
+If you discover a potential security issue in s2n-tls we ask that you notify
AWS Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
-If you package or distribute s2n, or use s2n as part of a large multi-user service, you may be eligible for pre-notification of future s2n releases. Please contact s2n-pre-notification@amazon.com.
+If you package or distribute s2n-tls, or use s2n-tls as part of a large multi-user service, you may be eligible for pre-notification of future s2n-tls releases. Please contact s2n-pre-notification@amazon.com.
-## Contributing to s2n
-If you are interested in contributing to s2n, please see our [development guide](https://github.com/awslabs/s2n/blob/main/docs/DEVELOPMENT-GUIDE.md).
+## Contributing to s2n-tls
+If you are interested in contributing to s2n-tls, please see our [development guide](https://github.com/aws/s2n-tls/blob/main/docs/DEVELOPMENT-GUIDE.md).
-## Language Bindings for s2n
-See our [language bindings list](https://github.com/awslabs/s2n/blob/main/docs/BINDINGS.md) for language bindings for s2n that we're aware of.
+## Language Bindings for s2n-tls
+See our [language bindings list](https://github.com/aws/s2n-tls/blob/main/docs/BINDINGS.md) for language bindings for s2n-tls that we're aware of.
diff --git a/contrib/restricted/aws/s2n/VERSIONING.rst b/contrib/restricted/aws/s2n/VERSIONING.rst
new file mode 100644
index 0000000000..7c347309c6
--- /dev/null
+++ b/contrib/restricted/aws/s2n/VERSIONING.rst
@@ -0,0 +1,40 @@
+*****************
+Versioning Policy
+*****************
+
+We use a three-part X.Y.Z (Major.Minor.Patch) versioning definition, as follows:
+
+* **X (Major)** version changes are significant and expected to break backwards compatibility.
+* **Y (Minor)** version changes are moderate changes. These include:
+
+ * Significant non-breaking feature additions.
+ * Possible backwards-incompatible changes. These changes will be noted and explained in detail in the release notes.
+
+* **Z (Patch)** version changes are small changes. These changes will not break backwards compatibility.
+
+ * Z releases will also include warning of upcoming breaking changes, whenever possible.
+
+Beta releases
+=============
+
+Versions with a zero major version (0.Y.Z) are considered to be beta
+releases. In beta releases, a Y-change may involve significant API changes.
+
+Branch stability
+================
+
+Untagged branches (such as main) are not subject to any API or ABI
+stability policy; APIs may change at any time.
+
+What this means for you
+=======================
+
+We recommend running the most recent version. Here are our suggestions for managing updates:
+
+* Beta releases should be considered to be under flux. While we will try to minimize churn, expect that
+ you'll need to make some changes to move to the 1.0.0 release.
+* X changes will require some effort to incorporate.
+* Y changes will not require significant effort to incorporate.
+ * If you have good unit and integration tests, these changes are generally safe to pick up automatically.
+* Z changes will not require any changes to your code. Z changes are intended to be picked up automatically.
+ * Good unit and integration tests are always recommended.
diff --git a/contrib/restricted/aws/s2n/api/s2n.h b/contrib/restricted/aws/s2n/api/s2n.h
index 0dfa21bf26..ee89829512 100644
--- a/contrib/restricted/aws/s2n/api/s2n.h
+++ b/contrib/restricted/aws/s2n/api/s2n.h
@@ -58,7 +58,7 @@ extern __thread int s2n_errno;
* in runtimes where thread-local variables may not be easily accessible.
*/
S2N_API
-extern int *s2n_errno_location();
+extern int *s2n_errno_location(void);
typedef enum {
S2N_ERR_T_OK=0,
@@ -77,6 +77,21 @@ extern int s2n_error_get_type(int error);
struct s2n_config;
struct s2n_connection;
+/**
+ * Prevents S2N from calling `OPENSSL_crypto_init`/`OPENSSL_cleanup`/`EVP_cleanup` on OpenSSL versions
+ * prior to 1.1.x. This allows applications or languages that also init OpenSSL to interoperate
+ * with S2N.
+ */
+S2N_API
+extern int s2n_crypto_disable_init(void);
+
+/**
+ * Prevents S2N from installing an atexit handler, which allows safe shutdown of S2N from within a
+ * re-entrant shared library
+ */
+S2N_API
+extern int s2n_disable_atexit(void);
+
S2N_API
extern unsigned long s2n_get_openssl_version(void);
S2N_API
@@ -167,6 +182,7 @@ typedef enum {
S2N_TLS_MAX_FRAG_LEN_4096 = 4,
} s2n_max_frag_len;
+struct s2n_cert;
struct s2n_cert_chain_and_key;
struct s2n_pkey;
typedef struct s2n_pkey s2n_cert_public_key;
@@ -177,6 +193,10 @@ extern struct s2n_cert_chain_and_key *s2n_cert_chain_and_key_new(void);
S2N_API
extern int s2n_cert_chain_and_key_load_pem(struct s2n_cert_chain_and_key *chain_and_key, const char *chain_pem, const char *private_key_pem);
S2N_API
+extern int s2n_cert_chain_and_key_load_pem_bytes(struct s2n_cert_chain_and_key *chain_and_key, uint8_t *chain_pem, uint32_t chain_pem_len, uint8_t *private_key_pem, uint32_t private_key_pem_len);
+S2N_API
+extern int s2n_cert_chain_and_key_load_public_pem_bytes(struct s2n_cert_chain_and_key *chain_and_key, uint8_t *chain_pem, uint32_t chain_pem_len);
+S2N_API
extern int s2n_cert_chain_and_key_free(struct s2n_cert_chain_and_key *cert_and_key);
S2N_API
extern int s2n_cert_chain_and_key_set_ctx(struct s2n_cert_chain_and_key *cert_and_key, void *ctx);
@@ -203,6 +223,21 @@ extern int s2n_config_set_verification_ca_location(struct s2n_config *config, co
S2N_API
extern int s2n_config_add_pem_to_trust_store(struct s2n_config *config, const char *pem);
+/**
+ * Clear the trust store.
+ *
+ * Note that the trust store will be initialized with the common locations for
+ * the host operating system by default. To completely override those locations,
+ * call this before functions like `s2n_config_set_verification_ca_location()`
+ * or `s2n_config_add_pem_to_trust_store()`
+ *
+ * @param config The configuration object being updated
+ *
+ * @return 0 on success and -1 on error
+ */
+S2N_API
+extern int s2n_config_wipe_trust_store(struct s2n_config *config);
+
typedef uint8_t (*s2n_verify_host_fn) (const char *host_name, size_t host_name_len, void *data);
/* will be inherited by s2n_connection. If s2n_connection specifies a callback, that callback will be used for that connection. */
S2N_API
@@ -219,6 +254,19 @@ S2N_API
extern int s2n_config_add_dhparams(struct s2n_config *config, const char *dhparams_pem);
S2N_API
extern int s2n_config_set_cipher_preferences(struct s2n_config *config, const char *version);
+
+/**
+ * Appends the provided application protocol to the preference list
+ *
+ * The data provided in `protocol` parameter will be copied into an internal buffer
+ *
+ * @param config The configuration object being updated
+ * @param protocol A pointer to a byte array value
+ * @param protocol_len The length of bytes that should be read from `protocol`. Note: this value cannot be 0, otherwise an error will be returned.
+ */
+S2N_API
+extern int s2n_config_append_protocol_preference(struct s2n_config *config, const uint8_t *protocol, uint8_t protocol_len);
+
S2N_API
extern int s2n_config_set_protocol_preferences(struct s2n_config *config, const char * const *protocols, int protocol_count);
typedef enum { S2N_STATUS_REQUEST_NONE = 0, S2N_STATUS_REQUEST_OCSP = 1 } s2n_status_request_type;
@@ -254,6 +302,11 @@ extern int s2n_config_add_ticket_crypto_key(struct s2n_config *config,
uint8_t *key, uint32_t key_len,
uint64_t intro_time_in_seconds_from_epoch);
+S2N_API
+extern int s2n_config_set_ctx(struct s2n_config *config, void *ctx);
+S2N_API
+extern int s2n_config_get_ctx(struct s2n_config *config, void **ctx);
+
typedef enum { S2N_SERVER, S2N_CLIENT } s2n_mode;
S2N_API
extern struct s2n_connection *s2n_connection_new(s2n_mode mode);
@@ -266,8 +319,15 @@ S2N_API
extern void *s2n_connection_get_ctx(struct s2n_connection *conn);
typedef int s2n_client_hello_fn(struct s2n_connection *conn, void *ctx);
+typedef enum { S2N_CLIENT_HELLO_CB_BLOCKING, S2N_CLIENT_HELLO_CB_NONBLOCKING } s2n_client_hello_cb_mode;
S2N_API
extern int s2n_config_set_client_hello_cb(struct s2n_config *config, s2n_client_hello_fn client_hello_callback, void *ctx);
+S2N_API
+extern int s2n_config_set_client_hello_cb_mode(struct s2n_config *config, s2n_client_hello_cb_mode cb_mode);
+S2N_API
+extern int s2n_client_hello_cb_done(struct s2n_connection *conn);
+S2N_API
+extern int s2n_connection_server_name_extension_used(struct s2n_connection *conn);
struct s2n_client_hello;
S2N_API
@@ -288,6 +348,19 @@ S2N_API
extern ssize_t s2n_client_hello_get_extension_length(struct s2n_client_hello *ch, s2n_tls_extension_type extension_type);
S2N_API
extern ssize_t s2n_client_hello_get_extension_by_id(struct s2n_client_hello *ch, s2n_tls_extension_type extension_type, uint8_t *out, uint32_t max_length);
+/**
+ * Used to check if a particular extension exists in the client hello.
+ *
+ * @param ch A pointer to the client hello object
+ * @param extension_iana The iana value of the extension
+ * @param exists A pointer that will be set to whether or not the extension exists
+ */
+S2N_API
+extern int s2n_client_hello_has_extension(struct s2n_client_hello *ch, uint16_t extension_iana, bool *exists);
+S2N_API
+extern int s2n_client_hello_get_session_id_length(struct s2n_client_hello *ch, uint32_t *out_length);
+S2N_API
+extern int s2n_client_hello_get_session_id(struct s2n_client_hello *ch, uint8_t *out, uint32_t *out_length, uint32_t max_length);
S2N_API
extern int s2n_connection_set_fd(struct s2n_connection *conn, int fd);
@@ -295,6 +368,25 @@ S2N_API
extern int s2n_connection_set_read_fd(struct s2n_connection *conn, int readfd);
S2N_API
extern int s2n_connection_set_write_fd(struct s2n_connection *conn, int writefd);
+
+/**
+ * Gets the assigned file descriptor for the read channel of an s2n connection.
+ *
+ * @param conn A pointer to the s2n connection
+ * @param readfd pointer to place the used file descriptor.
+ */
+S2N_API
+extern int s2n_connection_get_read_fd(struct s2n_connection *conn, int *readfd);
+
+/**
+ * Gets the assigned file descriptor for the write channel of an s2n connection.
+ *
+ * @param conn A pointer to the s2n connection
+ * @param writefd pointer to place the used file descriptor.
+ */
+S2N_API
+extern int s2n_connection_get_write_fd(struct s2n_connection *conn, int *writefd);
+
S2N_API
extern int s2n_connection_use_corked_io(struct s2n_connection *conn);
@@ -328,6 +420,19 @@ extern uint64_t s2n_connection_get_delay(struct s2n_connection *conn);
S2N_API
extern int s2n_connection_set_cipher_preferences(struct s2n_connection *conn, const char *version);
+
+/**
+ * Appends the provided application protocol to the preference list
+ *
+ * The data provided in `protocol` parameter will be copied into an internal buffer
+ *
+ * @param conn The connection object being updated
+ * @param protocol A pointer to a slice of bytes
+ * @param protocol_len The length of bytes that should be read from `protocol`. Note: this value cannot be 0, otherwise an error will be returned.
+ */
+S2N_API
+extern int s2n_connection_append_protocol_preference(struct s2n_connection *conn, const uint8_t *protocol, uint8_t protocol_len);
+
S2N_API
extern int s2n_connection_set_protocol_preferences(struct s2n_connection *conn, const char * const *protocols, int protocol_count);
S2N_API
@@ -341,7 +446,13 @@ extern const uint8_t *s2n_connection_get_ocsp_response(struct s2n_connection *co
S2N_API
extern const uint8_t *s2n_connection_get_sct_list(struct s2n_connection *conn, uint32_t *length);
-typedef enum { S2N_NOT_BLOCKED = 0, S2N_BLOCKED_ON_READ, S2N_BLOCKED_ON_WRITE, S2N_BLOCKED_ON_APPLICATION_INPUT } s2n_blocked_status;
+typedef enum {
+ S2N_NOT_BLOCKED = 0,
+ S2N_BLOCKED_ON_READ,
+ S2N_BLOCKED_ON_WRITE,
+ S2N_BLOCKED_ON_APPLICATION_INPUT,
+ S2N_BLOCKED_ON_EARLY_DATA,
+} s2n_blocked_status;
S2N_API
extern int s2n_negotiate(struct s2n_connection *conn, s2n_blocked_status *blocked);
S2N_API
@@ -379,6 +490,111 @@ extern int s2n_connection_set_client_auth_type(struct s2n_connection *conn, s2n_
S2N_API
extern int s2n_connection_get_client_cert_chain(struct s2n_connection *conn, uint8_t **der_cert_chain_out, uint32_t *cert_chain_len);
+/**
+ * Sets the initial number of session tickets to send after a >=TLS1.3 handshake. The default value is one ticket.
+ *
+ * @param config A pointer to the config object.
+ * @param num The number of session tickets that will be sent.
+ */
+S2N_API
+extern int s2n_config_set_initial_ticket_count(struct s2n_config *config, uint8_t num);
+
+/**
+ * Increases the number of session tickets to send after a >=TLS1.3 handshake.
+ *
+ * @param conn A pointer to the connection object.
+ * @param num The number of additional session tickets to send.
+ */
+S2N_API
+extern int s2n_connection_add_new_tickets_to_send(struct s2n_connection *conn, uint8_t num);
+
+/**
+ * Returns the number of session tickets issued by the server.
+ *
+ * In TLS1.3, this number can be up to the limit configured by s2n_config_set_initial_ticket_count
+ * and s2n_connection_add_new_tickets_to_send. In earlier versions of TLS, this number will be either 0 or 1.
+ *
+ * This method only works for server connections.
+ *
+ * @param conn A pointer to the connection object.
+ * @param num The number of additional session tickets sent.
+ */
+S2N_API
+extern int s2n_connection_get_tickets_sent(struct s2n_connection *conn, uint16_t *num);
+
+/**
+ * Sets the keying material lifetime for >=TLS1.3 session tickets so that one session doesn't get re-used ad infinitum.
+ * The default value is one week.
+ *
+ * @param conn A pointer to the connection object.
+ * @param lifetime_in_secs Lifetime of keying material in seconds.
+ */
+S2N_API
+extern int s2n_connection_set_server_keying_material_lifetime(struct s2n_connection *conn, uint32_t lifetime_in_secs);
+
+struct s2n_session_ticket;
+
+/**
+ * Callback function for receiving a session ticket.
+ *
+ * # Safety
+ *
+ * `ctx` is a void pointer and the caller is responsible for ensuring it is cast to the correct type.
+ * `ticket` is valid only within the scope of this callback.
+ *
+ * @param conn A pointer to the connection object.
+ * @param ctx Context for the session ticket callback function.
+ * @param ticket Pointer to the received session ticket object.
+ */
+typedef int (*s2n_session_ticket_fn)(struct s2n_connection *conn, void *ctx, struct s2n_session_ticket *ticket);
+
+/**
+ * Sets a session ticket callback to be called when a client receives a new session ticket.
+ *
+ * # Safety
+ *
+ * `callback` MUST cast `ctx` into the same type of pointer that was originally created.
+ * `ctx` MUST be valid for the lifetime of the config, or until a different context is set.
+ *
+ * @param config A pointer to the config object.
+ * @param callback The function that should be called when the callback is triggered.
+ * @param ctx The context to be passed when the callback is called.
+ */
+S2N_API
+extern int s2n_config_set_session_ticket_cb(struct s2n_config *config, s2n_session_ticket_fn callback, void *ctx);
+
+/**
+ * Gets the length of the session ticket from a session ticket object.
+ *
+ * @param ticket Pointer to the session ticket object.
+ * @param data_len Pointer to be set to the length of the session ticket on success.
+ */
+S2N_API
+extern int s2n_session_ticket_get_data_len(struct s2n_session_ticket *ticket, size_t *data_len);
+
+/**
+ * Gets the session ticket data from a session ticket object.
+ *
+ * # Safety
+ * The entire session ticket will be copied into `data` on success. Therefore, `data` MUST have enough
+ * memory to store the session ticket data.
+ *
+ * @param ticket Pointer to the session ticket object.
+ * @param max_data_len Maximum length of data that can be written to the 'data' pointer.
+ * @param data Pointer to where the session ticket data will be stored.
+ */
+S2N_API
+extern int s2n_session_ticket_get_data(struct s2n_session_ticket *ticket, size_t max_data_len, uint8_t *data);
+
+/**
+ * Gets the lifetime in seconds of the session ticket from a session ticket object.
+ *
+ * @param ticket Pointer to the session ticket object.
+ * @param session_lifetime Pointer to a variable where the lifetime of the session ticket will be stored.
+ */
+S2N_API
+extern int s2n_session_ticket_get_lifetime(struct s2n_session_ticket *ticket, uint32_t *session_lifetime);
+
S2N_API
extern int s2n_connection_set_session(struct s2n_connection *conn, const uint8_t *session, size_t length);
S2N_API
@@ -396,9 +612,430 @@ extern int s2n_connection_is_session_resumed(struct s2n_connection *conn);
S2N_API
extern int s2n_connection_is_ocsp_stapled(struct s2n_connection *conn);
+/* TLS Signature Algorithms - RFC 5246 7.4.1.4.1 */
+/* https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-16 */
+typedef enum {
+ S2N_TLS_SIGNATURE_ANONYMOUS = 0,
+ S2N_TLS_SIGNATURE_RSA = 1,
+ S2N_TLS_SIGNATURE_ECDSA = 3,
+
+ /* Use Private Range for RSA PSS since it's not defined there */
+ S2N_TLS_SIGNATURE_RSA_PSS_RSAE = 224,
+ S2N_TLS_SIGNATURE_RSA_PSS_PSS
+} s2n_tls_signature_algorithm;
+
+/* TLS Hash Algorithm - https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 */
+/* https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-18 */
+typedef enum {
+ S2N_TLS_HASH_NONE = 0,
+ S2N_TLS_HASH_MD5 = 1,
+ S2N_TLS_HASH_SHA1 = 2,
+ S2N_TLS_HASH_SHA224 = 3,
+ S2N_TLS_HASH_SHA256 = 4,
+ S2N_TLS_HASH_SHA384 = 5,
+ S2N_TLS_HASH_SHA512 = 6,
+
+ /* Use Private Range for MD5_SHA1 */
+ S2N_TLS_HASH_MD5_SHA1 = 224
+} s2n_tls_hash_algorithm;
+
+S2N_API
+extern int s2n_connection_get_selected_signature_algorithm(struct s2n_connection *conn, s2n_tls_signature_algorithm *chosen_alg);
+S2N_API
+extern int s2n_connection_get_selected_digest_algorithm(struct s2n_connection *conn, s2n_tls_hash_algorithm *chosen_alg);
+S2N_API
+extern int s2n_connection_get_selected_client_cert_signature_algorithm(struct s2n_connection *conn, s2n_tls_signature_algorithm *chosen_alg);
+S2N_API
+extern int s2n_connection_get_selected_client_cert_digest_algorithm(struct s2n_connection *conn, s2n_tls_hash_algorithm *chosen_alg);
+
S2N_API
extern struct s2n_cert_chain_and_key *s2n_connection_get_selected_cert(struct s2n_connection *conn);
+/**
+ * Returns the length of the s2n certificate chain `chain_and_key`.
+ *
+ * @param chain_and_key A pointer to the s2n_cert_chain_and_key object being read.
+ * @param cert_length This return value represents the length of the s2n certificate chain `chain_and_key`.
+ */
+S2N_API
+extern int s2n_cert_chain_get_length(const struct s2n_cert_chain_and_key *chain_and_key, uint32_t *cert_length);
+
+/**
+ * Returns the certificate `out_cert` present at the index `cert_idx` of the certificate chain `chain_and_key`.
+ *
+ * Note that the index of the leaf certificate is zero. If the certificate chain `chain_and_key` is NULL or the
+ * certificate index value is not in the acceptable range for the input certificate chain, an error is returned.
+ *
+ * # Safety
+ *
+ * There is no memory allocation required for `out_cert` buffer prior to calling the `s2n_cert_chain_get_cert` API.
+ * The `out_cert` will contain the pointer to the s2n_cert initialized within the input s2n_cert_chain_and_key `chain_and_key`.
+ * The pointer to the output s2n certificate `out_cert` is valid until `chain_and_key` is freed up.
+ * If a caller wishes to persist the `out_cert` beyond the lifetime of `chain_and_key`, the contents would need to be
+ * copied prior to freeing `chain_and_key`.
+ *
+ * @param chain_and_key A pointer to the s2n_cert_chain_and_key object being read.
+ * @param out_cert A pointer to the output s2n_cert `out_cert` present at the index `cert_idx` of the certificate chain `chain_and_key`.
+ * @param cert_idx The certificate index for the requested certificate within the s2n certificate chain.
+ */
+S2N_API
+extern int s2n_cert_chain_get_cert(const struct s2n_cert_chain_and_key *chain_and_key, struct s2n_cert **out_cert, const uint32_t cert_idx);
+
+/**
+ * Returns the s2n certificate in DER format along with its length.
+ *
+ * The API gets the s2n certificate `cert` in DER format. The certificate is returned in the `out_cert_der` buffer.
+ * Here, `cert_len` represents the length of the certificate.
+ *
+ * A caller can use certificate parsing tools such as the ones provided by OpenSSL to parse the DER encoded certificate chain returned.
+ *
+ * # Safety
+ *
+ * The memory for the `out_cert_der` buffer is allocated and owned by s2n-tls.
+ * Since the size of the certificate can potentially be very large, a pointer to internal connection data is returned instead of
+ * copying the contents into a caller-provided buffer.
+ *
+ * The pointer to the output buffer `out_cert_der` is valid only while the connection exists.
+ * The `s2n_connection_free` API frees the memory associated with the out_cert_der buffer and after the `s2n_connection_wipe` API is
+ * called the memory pointed by out_cert_der is invalid.
+ *
+ * If a caller wishes to persist the `out_cert_der` beyond the lifetime of the connection, the contents would need to be
+ * copied prior to the connection termination.
+ *
+ * @param cert A pointer to the s2n_cert object being read.
+ * @param out_cert_der A pointer to the output buffer which will hold the s2n certificate `cert` in DER format.
+ * @param cert_length This return value represents the length of the certificate.
+ */
+S2N_API
+extern int s2n_cert_get_der(const struct s2n_cert *cert, const uint8_t **out_cert_der, uint32_t *cert_length);
+
+/**
+ * Returns the validated peer certificate chain as a `s2n_cert_chain_and_key` opaque object.
+ *
+ * The `s2n_cert_chain_and_key` parameter must be allocated by the caller using the `s2n_cert_chain_and_key_new` API
+ * prior to this function call and must be empty. To free the memory associated with the `s2n_cert_chain_and_key` object use the
+ * `s2n_cert_chain_and_key_free` API.
+ *
+ * @param conn A pointer to the s2n_connection object being read.
+ * @param s2n_cert_chain_and_key The returned validated peer certificate chain `cert_chain` retrieved from the s2n connection.
+ */
+S2N_API
+extern int s2n_connection_get_peer_cert_chain(const struct s2n_connection *conn, struct s2n_cert_chain_and_key *cert_chain);
+
+/**
+ * Returns the length of the DER encoded extension value of the ASN.1 X.509 certificate extension.
+ *
+ * @param cert A pointer to the s2n_cert object being read.
+ * @param oid A null-terminated cstring that contains the OID of the X.509 certificate extension to be read.
+ * @param ext_value_len This return value contains the length of DER encoded extension value of the ASN.1 X.509 certificate extension.
+ */
+S2N_API
+extern int s2n_cert_get_x509_extension_value_length(struct s2n_cert *cert, const uint8_t *oid, uint32_t *ext_value_len);
+
+/**
+ * Returns the DER encoding of an ASN.1 X.509 certificate extension value, it's length and a boolean critical.
+ *
+ * @param cert A pointer to the s2n_cert object being read.
+ * @param oid A null-terminated cstring that contains the OID of the X.509 certificate extension to be read.
+ * @param ext_value A pointer to the output buffer which will hold the DER encoding of an ASN.1 X.509 certificate extension value returned.
+ * @param ext_value_len This value is both an input and output parameter and represents the length of the output buffer `ext_value`.
+ * When used as an input parameter, the caller must use this parameter to convey the maximum length of `ext_value`.
+ * When used as an output parameter, `ext_value_len` holds the actual length of the DER encoding of the ASN.1 X.509 certificate extension value returned.
+ * @param critical This return value contains the boolean value for `critical`.
+ */
+S2N_API
+extern int s2n_cert_get_x509_extension_value(struct s2n_cert *cert, const uint8_t *oid, uint8_t *ext_value, uint32_t *ext_value_len, bool *critical);
+
+/**
+ * Returns the UTF8 String length of the ASN.1 X.509 certificate extension data.
+ *
+ * @param extension_data A pointer to the DER encoded ASN.1 X.509 certificate extension value being read.
+ * @param extension_len represents the length of the input buffer `extension_data`.
+ * @param utf8_str_len This return value contains the UTF8 String length of the ASN.1 X.509 certificate extension data.
+ */
+S2N_API
+extern int s2n_cert_get_utf8_string_from_extension_data_length(const uint8_t *extension_data, uint32_t extension_len, uint32_t *utf8_str_len);
+
+/**
+ * Returns the UTF8 String representation of the DER encoded ASN.1 X.509 certificate extension data.
+ *
+ * @param extension_data A pointer to the DER encoded ASN.1 X.509 certificate extension value being read.
+ * @param extension_len represents the length of the input buffer `extension_data`.
+ * @param out_data A pointer to the output buffer which will hold the UTF8 String representation of the DER encoded ASN.1 X.509
+ * certificate extension data returned.
+ * @param out_len This value is both an input and output parameter and represents the length of the output buffer `out_data`.
+ * When used as an input parameter, the caller must use this parameter to convey the maximum length of `out_data`.
+ * When used as an output parameter, `out_len` holds the actual length of UTF8 String returned.
+ */
+S2N_API
+extern int s2n_cert_get_utf8_string_from_extension_data(const uint8_t *extension_data, uint32_t extension_len, uint8_t *out_data, uint32_t *out_len);
+
+/* Pre-shared key (PSK) Hash Algorithm - RFC 8446 Section-2.2 */
+typedef enum {
+ S2N_PSK_HMAC_SHA256,
+ S2N_PSK_HMAC_SHA384,
+} s2n_psk_hmac;
+
+struct s2n_psk;
+
+/**
+ * Creates a new s2n external pre-shared key (PSK) object with `S2N_PSK_HMAC_SHA256` as the default
+ * PSK hash algorithm. An external PSK is a key established outside of TLS using a secure mutually agreed upon mechanism.
+ *
+ * Use `s2n_psk_free` to free the memory allocated to the s2n external PSK object created by this API.
+ *
+ * @return struct s2n_psk* Returns a pointer to the newly created external PSK object.
+ */
+S2N_API
+struct s2n_psk* s2n_external_psk_new(void);
+
+/**
+ * Frees the memory associated with the external PSK object.
+ *
+ * @param psk Pointer to the PSK object to be freed.
+ */
+S2N_API
+int s2n_psk_free(struct s2n_psk **psk);
+
+/**
+ * Sets the identity for a given external PSK object.
+ * The identity is a unique identifier for the pre-shared secret.
+ * It is a non-secret value represented by raw bytes.
+ *
+ * # Safety
+ *
+ * The identity is transmitted over the network unencrypted and is a non-secret value.
+ * Do not include confidential information in the identity.
+ *
+ * Note that the identity is copied into s2n-tls memory and the caller is responsible for
+ * freeing the memory associated with the identity input.
+ *
+ * @param psk A pointer to a PSK object to be updated with the identity.
+ * @param identity The identity in raw bytes format to be copied.
+ * @param identity_size The length of the PSK identity being set.
+ */
+S2N_API
+int s2n_psk_set_identity(struct s2n_psk *psk, const uint8_t *identity, uint16_t identity_size);
+
+/**
+ * Sets the out-of-band/externally provisioned secret for a given external PSK object.
+ *
+ * # Safety
+ *
+ * Note that the secret is copied into s2n-tls memory and the caller is responsible for
+ * freeing the memory associated with the `secret` input.
+ *
+ * Deriving a shared secret from a password or other low-entropy source
+ * is not secure and is subject to dictionary attacks.
+ * See https://tools.ietf.org/rfc/rfc8446#section-2.2 for more information.
+ *
+ * @param psk A pointer to a PSK object to be updated with the secret.
+ * @param secret The secret in raw bytes format to be copied.
+ * @param secret_size The length of the pre-shared secret being set.
+ */
+S2N_API
+int s2n_psk_set_secret(struct s2n_psk *psk, const uint8_t *secret, uint16_t secret_size);
+
+/**
+ * Sets the hash algorithm for a given external PSK object. The supported PSK hash
+ * algorithms are as listed in the enum `s2n_psk_hmac` above.
+ *
+ * @param psk A pointer to the external PSK object to be updated with the PSK hash algorithm.
+ * @param hmac The PSK hash algorithm being set.
+ */
+S2N_API
+int s2n_psk_set_hmac(struct s2n_psk *psk, s2n_psk_hmac hmac);
+
+/**
+ * Appends a PSK object to the list of PSKs supported by the s2n connection.
+ * If a PSK with a duplicate identity is found, an error is returned and the PSK is not added to the list.
+ * Note that a copy of `psk` is stored on the connection. The user is still responsible for freeing the
+ * memory associated with `psk`.
+ *
+ * @param conn A pointer to the s2n_connection object that contains the list of PSKs supported.
+ * @param psk A pointer to the `s2n_psk` object to be appended to the list of PSKs on the s2n connection.
+ */
+S2N_API
+int s2n_connection_append_psk(struct s2n_connection *conn, struct s2n_psk *psk);
+
+/**
+ * The list of PSK modes supported by s2n-tls for TLS versions >= TLS1.3.
+ * Currently s2n-tls supports two modes - `S2N_PSK_MODE_RESUMPTION`, which represents the PSKs established
+ * using the previous connection via session resumption, and `S2N_PSK_MODE_EXTERNAL`, which represents PSKs
+ * established out-of-band/externally using a secure mutually agreed upon mechanism.
+ */
+typedef enum {
+ S2N_PSK_MODE_RESUMPTION,
+ S2N_PSK_MODE_EXTERNAL
+} s2n_psk_mode;
+
+/**
+ * Sets the PSK mode on the s2n config object.
+ * The supported PSK modes are listed in the enum `s2n_psk_mode` above.
+ *
+ * @param config A pointer to the s2n_config object being updated.
+ * @param mode The PSK mode to be set.
+ */
+S2N_API
+int s2n_config_set_psk_mode(struct s2n_config *config, s2n_psk_mode mode);
+
+/**
+ * Sets the PSK mode on the s2n connection object.
+ * The supported PSK modes are listed in the enum `s2n_psk_mode` above.
+ * This API overrides the PSK mode set on config for this connection.
+ *
+ * @param conn A pointer to the s2n_connection object being updated.
+ * @param mode The PSK mode to be set.
+ */
+S2N_API
+int s2n_connection_set_psk_mode(struct s2n_connection *conn, s2n_psk_mode mode);
+
+/**
+ * Gets the negotiated PSK identity length from the s2n connection object. The negotiated PSK
+ * refers to the chosen PSK by the server to be used for the connection.
+ *
+ * This API can be used to determine if the negotiated PSK exists. If negotiated PSK exists a
+ * call to this API returns a value greater than zero. If the negotiated PSK does not exist, the
+ * value `0` is returned.
+ *
+ * @param conn A pointer to the s2n_connection object that successfully negotiated a PSK connection.
+ * @param identity_length The length of the negotiated PSK identity.
+ */
+S2N_API
+int s2n_connection_get_negotiated_psk_identity_length(struct s2n_connection *conn, uint16_t *identity_length);
+
+/**
+ * Gets the negotiated PSK identity from the s2n connection object.
+ * If the negotiated PSK does not exist, the PSK identity will not be obtained and no error will be returned.
+ * Prior to this API call, use `s2n_connection_get_negotiated_psk_identity_length` to determine if a
+ * negotiated PSK exists or not.
+ *
+ * # Safety
+ *
+ * The negotiated PSK identity will be copied into the identity buffer on success.
+ * Therefore, the identity buffer must have enough memory to fit the identity length.
+ *
+ * @param conn A pointer to the s2n_connection object.
+ * @param identity The negotiated PSK identity obtained from the s2n_connection object.
+ * @param max_identity_length The maximum length for the PSK identity. If the negotiated psk_identity length is
+ * greater than this `max_identity_length` value an error will be returned.
+ */
+S2N_API
+int s2n_connection_get_negotiated_psk_identity(struct s2n_connection *conn, uint8_t *identity, uint16_t max_identity_length);
+
+struct s2n_offered_psk;
+
+/**
+ * Creates a new s2n offered PSK object.
+ * An offered PSK object represents a single PSK sent by the client.
+ *
+ * # Safety
+ *
+ * Use `s2n_offered_psk_free` to free the memory allocated to the s2n offered PSK object created by this API.
+ *
+ * @return struct s2n_offered_psk* Returns a pointer to the newly created offered PSK object.
+ */
+S2N_API
+struct s2n_offered_psk* s2n_offered_psk_new(void);
+
+/**
+ * Frees the memory associated with the `s2n_offered_psk` object.
+ *
+ * @param psk A pointer to the `s2n_offered_psk` object to be freed.
+ */
+S2N_API
+int s2n_offered_psk_free(struct s2n_offered_psk **psk);
+
+/**
+ * Gets the PSK identity and PSK identity length for a given offered PSK object.
+ *
+ * @param psk A pointer to the offered PSK object being read.
+ * @param identity The PSK identity being obtained.
+ * @param size The length of the PSK identity being obtained.
+ */
+S2N_API
+int s2n_offered_psk_get_identity(struct s2n_offered_psk *psk, uint8_t** identity, uint16_t *size);
+
+struct s2n_offered_psk_list;
+
+/**
+ * Checks whether the offered PSK list has an offered psk object next in line in the list.
+ * An offered PSK list contains all the PSKs offered by the client for the server to select.
+ *
+ * # Safety
+ *
+ * This API returns a pointer to the s2n-tls internal memory with limited lifetime.
+ * After the completion of `s2n_psk_selection_callback` this pointer is invalid.
+ *
+ * @param psk_list A pointer to the offered PSK list being read.
+ * @return bool A boolean value representing whether an offered psk object is present next in line in the offered PSK list.
+ */
+S2N_API
+bool s2n_offered_psk_list_has_next(struct s2n_offered_psk_list *psk_list);
+
+/**
+ * Obtains the next offered PSK object from the list of offered PSKs. Use `s2n_offered_psk_list_has_next`
+ * prior to this API call to ensure we have not reached the end of the list.
+ *
+ * @param psk_list A pointer to the offered PSK list being read.
+ * @param psk A pointer to the next offered PSK object being obtained.
+ */
+S2N_API
+int s2n_offered_psk_list_next(struct s2n_offered_psk_list *psk_list, struct s2n_offered_psk *psk);
+
+/**
+ * Returns the offered PSK list to its original read state.
+ *
+ * When `s2n_offered_psk_list_reread` is called, `s2n_offered_psk_list_next` will return the first PSK
+ * in the offered PSK list.
+ *
+ * @param psk_list A pointer to the offered PSK list being reread.
+ */
+S2N_API
+int s2n_offered_psk_list_reread(struct s2n_offered_psk_list *psk_list);
+
+/**
+ * Chooses a PSK from the offered PSK list to be used for the connection.
+ * This API matches the PSK identity received from the client against the server's known PSK identities
+ * list, in order to choose the PSK to be used for the connection. If the PSK identity sent from the client
+ * is NULL, no PSK is chosen for the connection. If the client offered PSK identity has no matching PSK identity
+ * with the server, an error will be returned. Use this API along with the `s2n_psk_selection_callback` callback
+ * to select a PSK identity.
+ *
+ * @param psk_list A pointer to the server's known PSK list used to compare for a matching PSK with the client.
+ * @param psk A pointer to the client's PSK object used to compare with the server's known PSK identities.
+ */
+S2N_API int s2n_offered_psk_list_choose_psk(struct s2n_offered_psk_list *psk_list, struct s2n_offered_psk *psk);
+
+/**
+ * Callback function to select a PSK from a list of offered PSKs.
+ * Use this callback to implement custom PSK selection logic. The s2n-tls default PSK selection logic
+ * chooses the first matching PSK from the list of offered PSKs sent by the client.
+ *
+ * # Safety
+ *
+ * `context` is a void pointer and the caller is responsible for ensuring it is cast to the correct type.
+ * After the completion of this callback, the pointer to `psk_list` is invalid.
+ *
+ * @param conn A pointer to the s2n_connection object.
+ * @param context A pointer to a context for the caller to pass state to the callback, if needed.
+ * @param psk_list A pointer to the offered PSK list being read.
+ */
+typedef int (*s2n_psk_selection_callback)(struct s2n_connection *conn, void *context,
+ struct s2n_offered_psk_list *psk_list);
+
+/**
+ * Sets the callback to select the matching PSK.
+ * If this callback is not set s2n-tls uses a default PSK selection logic that selects the first matching
+ * server PSK.
+ *
+ * @param config A pointer to the s2n_config object.
+ * @param cb The function that should be called when the callback is triggered.
+ * @param context A pointer to a context for the caller to pass state to the callback, if needed.
+ */
+S2N_API
+int s2n_config_set_psk_selection_callback(struct s2n_config *config, s2n_psk_selection_callback cb, void *context);
+
S2N_API
extern uint64_t s2n_connection_get_wire_bytes_in(struct s2n_connection *conn);
S2N_API
@@ -415,6 +1052,25 @@ S2N_API
extern int s2n_connection_client_cert_used(struct s2n_connection *conn);
S2N_API
extern const char *s2n_connection_get_cipher(struct s2n_connection *conn);
+
+/**
+ * Returns the IANA value for the connection's negotiated cipher suite.
+ *
+ * The value is returned in the form of `first,second`, in order to closely match
+ * the values defined in the [IANA Registry](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
+ * For example if the connection's negotiated cipher suite is `TLS_AES_128_GCM_SHA256`,
+ * which is registered as `0x13,0x01`, then `first = 0x13` and `second = 0x01`.
+ *
+ * This method will only succeed after the cipher suite has been negotiated with the peer.
+ *
+ * @param conn A pointer to the connection being read
+ * @param first A pointer to a single byte, which will be updated with the first byte in the registered IANA value.
+ * @param second A pointer to a single byte, which will be updated with the second byte in the registered IANA value.
+ * @return A POSIX error signal. If an error was returned, the values contained in `first` and `second` should be considered invalid.
+ */
+S2N_API
+extern int s2n_connection_get_cipher_iana_value(struct s2n_connection *conn, uint8_t *first, uint8_t *second);
+
S2N_API
extern int s2n_connection_is_valid_for_cipher_preferences(struct s2n_connection *conn, const char *version);
S2N_API
@@ -431,17 +1087,177 @@ S2N_API
extern const char *s2n_connection_get_last_message_name(struct s2n_connection *conn);
struct s2n_async_pkey_op;
+typedef enum { S2N_ASYNC_PKEY_VALIDATION_FAST, S2N_ASYNC_PKEY_VALIDATION_STRICT } s2n_async_pkey_validation_mode;
+typedef enum { S2N_ASYNC_DECRYPT, S2N_ASYNC_SIGN } s2n_async_pkey_op_type;
+/**
+ * Callback function for handling private key operations
+ *
+ * Invoked every time an operation requiring the private key is encountered
+ * during the handshake.
+ *
+ * # Safety
+ * * `op` is owned by the application and MUST be freed.
+ *
+ * @param conn Connection which triggered the callback
+ * @param op An opaque object representing the private key operation
+ */
typedef int (*s2n_async_pkey_fn)(struct s2n_connection *conn, struct s2n_async_pkey_op *op);
+
+/**
+ * Sets up the callback to invoke when private key operations occur.
+ *
+ * @param config Config to set the callback
+ * @param fn The function that should be called for each private key operation
+ */
S2N_API
extern int s2n_config_set_async_pkey_callback(struct s2n_config *config, s2n_async_pkey_fn fn);
+
+/**
+ * Performs a private key operation using the given private key.
+ *
+ * # Safety
+ * * Can only be called once. Any subsequent calls will produce a `S2N_ERR_T_USAGE` error.
+ * * Safe to call from inside s2n_async_pkey_fn
+ * * Safe to call from a different thread, as long as no other thread is operating on `op`.
+ *
+ * @param op An opaque object representing the private key operation
+ * @param s2n_cert_private_key The private key used for the operation. It can be extracted from
+ * `conn` through the `s2n_connection_get_selected_cert` and `s2n_cert_chain_and_key_get_key` calls
+ */
S2N_API
extern int s2n_async_pkey_op_perform(struct s2n_async_pkey_op *op, s2n_cert_private_key *key);
+
+/**
+ * Finalizes a private key operation and unblocks the connection.
+ *
+ * # Safety
+ * * `conn` must match the connection that originally triggered the callback.
+ * * Must be called after the operation is performed.
+ * * Can only be called once. Any subsequent calls will produce a `S2N_ERR_T_USAGE` error.
+ * * Safe to call from inside s2n_async_pkey_fn
+ * * Safe to call from a different thread, as long as no other thread is operating on `op`.
+ *
+ * @param op An opaque object representing the private key operation
+ * @param conn The connection associated with the operation that should be unblocked
+ */
S2N_API
extern int s2n_async_pkey_op_apply(struct s2n_async_pkey_op *op, struct s2n_connection *conn);
+
+/**
+ * Frees the opaque structure representing a private key operation.
+ *
+ * # Safety
+ * * MUST be called for every operation passed to s2n_async_pkey_fn
+ * * Safe to call before or after the connection that created the operation is freed
+ *
+ * @param op An opaque object representing the private key operation
+ */
S2N_API
extern int s2n_async_pkey_op_free(struct s2n_async_pkey_op *op);
+/**
+ * Configures whether or not s2n-tls will perform potentially expensive validation of
+ * the results of a private key operation.
+ *
+ * @param config Config to set the validation mode for
+ * @param mode What level of validation to perform
+ */
+S2N_API
+extern int s2n_config_set_async_pkey_validation_mode(struct s2n_config *config, s2n_async_pkey_validation_mode mode);
+
+/**
+ * Returns the type of the private key operation.
+ *
+ * @param op An opaque object representing the private key operation
+ * @param type A pointer to be set to the type
+ */
+S2N_API
+extern int s2n_async_pkey_op_get_op_type(struct s2n_async_pkey_op *op, s2n_async_pkey_op_type *type);
+
+/**
+ * Returns the size of the input to the private key operation.
+ *
+ * @param op An opaque object representing the private key operation
+ * @param data_len A pointer to be set to the size
+ */
+S2N_API
+extern int s2n_async_pkey_op_get_input_size(struct s2n_async_pkey_op *op, uint32_t *data_len);
+
+/**
+ * Returns the input to the private key operation.
+ *
+ * When signing, the input is the digest to sign.
+ * When decrypting, the input is the data to decrypt.
+ *
+ * # Safety
+ * * `data` must be sufficiently large to contain the input.
+ * `s2n_async_pkey_op_get_input_size` can be called to determine how much memory is required.
+ * * s2n-tls does not take ownership of `data`.
+ * The application still owns the memory and must free it if necessary.
+ *
+ * @param op An opaque object representing the private key operation
+ * @param data A pointer to a buffer to copy the input into
+ * @param data_len The maximum size of the `data` buffer
+ */
+S2N_API
+extern int s2n_async_pkey_op_get_input(struct s2n_async_pkey_op *op, uint8_t *data, uint32_t data_len);
+
+/**
+ * Sets the output of the private key operation.
+ *
+ * # Safety
+ * * s2n-tls does not take ownership of `data`.
+ * The application still owns the memory and must free it if necessary.
+ *
+ * @param op An opaque object representing the private key operation
+ * @param data A pointer to a buffer containing the output
+ * @param data_len The size of the `data` buffer
+ */
+S2N_API
+extern int s2n_async_pkey_op_set_output(struct s2n_async_pkey_op *op, const uint8_t *data, uint32_t data_len);
+
+/**
+ * Callback function for handling key log events
+ *
+ * THIS SHOULD BE USED FOR DEBUGGING PURPOSES ONLY!
+ *
+ * Each log line is formatted with the
+ * [NSS Key Log Format](https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format)
+ * without a newline.
+ *
+ * # Safety
+ *
+ * * `ctx` MUST be cast into the same type of pointer that was originally created
+ * * `logline` bytes MUST be copied or discarded before this function returns
+ *
+ * @param ctx Context for the callback
+ * @param conn Connection for which the log line is being emitted
+ * @param logline Pointer to the log line data
+ * @param len Length of the log line data
+ */
+typedef int (*s2n_key_log_fn)(void *ctx, struct s2n_connection *conn, uint8_t *logline, size_t len);
+
+/**
+ * Sets a key logging callback on the provided config
+ *
+ * THIS SHOULD BE USED FOR DEBUGGING PURPOSES ONLY!
+ *
+ * Setting this function enables configurations to emit secrets in the
+ * [NSS Key Log Format](https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format)
+ *
+ * # Safety
+ *
+ * * `callback` MUST cast `ctx` into the same type of pointer that was originally created
+ * * `ctx` MUST live for at least as long as it is set on the config
+ *
+ * @param config Config to set the callback
+ * @param callback The function that should be called for each secret log entry
+ * @param ctx The context to be passed when the callback is called
+ */
+S2N_API
+extern int s2n_config_set_key_log_cb(struct s2n_config *config, s2n_key_log_fn callback, void *ctx);
+
/* s2n_config_enable_cert_req_dss_legacy_compat adds a dss cert type in the server certificate request when being called.
* It only sends the dss cert type in the cert request but does not succeed the handshake if a dss cert is received.
* Please DO NOT call this api unless you know you actually need legacy DSS certificate type compatibility
@@ -449,6 +1265,242 @@ extern int s2n_async_pkey_op_free(struct s2n_async_pkey_op *op);
S2N_API
extern int s2n_config_enable_cert_req_dss_legacy_compat(struct s2n_config *config);
+/**
+ * Sets the maximum bytes of early data the server will accept.
+ *
+ * The default maximum is 0. If the maximum is 0, the server rejects all early data requests.
+ * The config maximum can be overridden by the connection maximum or the maximum on an external pre-shared key.
+ *
+ * @param config A pointer to the config
+ * @param max_early_data_size The maximum early data that the server will accept
+ * @return A POSIX error signal. If successful, the maximum early data size was updated.
+ */
+S2N_API int s2n_config_set_server_max_early_data_size(struct s2n_config *config, uint32_t max_early_data_size);
+
+/**
+ * Sets the maximum bytes of early data the server will accept.
+ *
+ * The default maximum is 0. If the maximum is 0, the server rejects all early data requests.
+ * The connection maximum can be overridden by the maximum on an external pre-shared key.
+ *
+ * @param conn A pointer to the connection
+ * @param max_early_data_size The maximum early data the server will accept
+ * @return A POSIX error signal. If successful, the maximum early data size was updated.
+ */
+S2N_API int s2n_connection_set_server_max_early_data_size(struct s2n_connection *conn, uint32_t max_early_data_size);
+
+/**
+ * Sets the user context associated with early data on a server.
+ *
+ * This context is passed to the `s2n_early_data_cb` callback to help decide whether to accept or reject early data.
+ *
+ * Unlike most contexts, the early data context is a byte buffer instead of a void pointer.
+ * This is because we need to serialize the context into session tickets.
+ *
+ * This API is intended for use with session resumption, and will not affect pre-shared keys.
+ *
+ * @param conn A pointer to the connection
+ * @param context A pointer to the user context data. This data will be copied.
+ * @param context_size The size of the data to read from the `context` pointer.
+ * @return A POSIX error signal. If successful, the context was updated.
+ */
+S2N_API int s2n_connection_set_server_early_data_context(struct s2n_connection *conn, const uint8_t *context, uint16_t context_size);
+
+/**
+ * Configures a particular pre-shared key to allow early data.
+ *
+ * `max_early_data_size` must be set to the maximum early data accepted by the server.
+ *
+ * In order to use early data, the cipher suite set on the pre-shared key must match the cipher suite
+ * ultimately negotiated by the TLS handshake. Additionally, the cipher suite must have the same
+ * hmac algorithm as the pre-shared key.
+ *
+ * @param psk A pointer to the pre-shared key, created with `s2n_external_psk_new`.
+ * @param max_early_data_size The maximum early data that can be sent or received using this key.
+ * @param cipher_suite_first_byte The first byte in the registered IANA value of the associated cipher suite.
+ * @param cipher_suite_second_byte The second byte in the registered IANA value of the associated cipher suite.
+ * @return A POSIX error signal. If successful, `psk` was updated.
+ */
+S2N_API int s2n_psk_configure_early_data(struct s2n_psk *psk, uint32_t max_early_data_size,
+ uint8_t cipher_suite_first_byte, uint8_t cipher_suite_second_byte);
+
+/**
+ * Sets the optional `application_protocol` associated with the given pre-shared key.
+ *
+ * In order to use early data, the `application_protocol` set on the pre-shared key must match
+ * the `application_protocol` ultimately negotiated by the TLS handshake.
+ *
+ * @param psk A pointer to the pre-shared key, created with `s2n_external_psk_new`.
+ * @param application_protocol A pointer to the associated application protocol data. This data will be copied.
+ * @param size The size of the data to read from the `application_protocol` pointer.
+ * @return A POSIX error signal. If successful, the application protocol was set.
+ */
+S2N_API int s2n_psk_set_application_protocol(struct s2n_psk *psk, const uint8_t *application_protocol, uint8_t size);
+
+/**
+ * Sets the optional user early data context associated with the given pre-shared key.
+ *
+ * The early data context is passed to the `s2n_early_data_cb` callback to help decide whether
+ * to accept or reject early data.
+ *
+ * @param psk A pointer to the pre-shared key, created with `s2n_external_psk_new`.
+ * @param context A pointer to the associated user context data. This data will be copied.
+ * @param size The size of the data to read from the `context` pointer.
+ * @return A POSIX error signal. If successful, the context was set.
+ */
+S2N_API int s2n_psk_set_early_data_context(struct s2n_psk *psk, const uint8_t *context, uint16_t size);
+
+/* The status of early data on a connection.
+ *
+ * S2N_EARLY_DATA_STATUS_OK: Early data is in progress.
+ * S2N_EARLY_DATA_STATUS_NOT_REQUESTED: The client did not request early data, so none was sent or received.
+ * S2N_EARLY_DATA_STATUS_REJECTED: The client requested early data, but the server rejected the request.
+ * Early data may have been sent, but was not received.
+ * S2N_EARLY_DATA_STATUS_END: All early data was successfully sent and received.
+ */
+typedef enum {
+ S2N_EARLY_DATA_STATUS_OK,
+ S2N_EARLY_DATA_STATUS_NOT_REQUESTED,
+ S2N_EARLY_DATA_STATUS_REJECTED,
+ S2N_EARLY_DATA_STATUS_END,
+} s2n_early_data_status_t;
+
+/**
+ * Reports the current state of early data for a connection.
+ *
+ * See `s2n_early_data_status_t` for all possible states.
+ *
+ * @param conn A pointer to the connection
+ * @param status A pointer which will be set to the current early data status
+ * @return A POSIX error signal.
+ */
+S2N_API int s2n_connection_get_early_data_status(struct s2n_connection *conn, s2n_early_data_status_t *status);
+
+/**
+ * Reports the remaining size of the early data allowed by a connection.
+ *
+ * If early data was rejected or not requested, the remaining early data size is 0.
+ * Otherwise, the remaining early data size is the maximum early data allowed by the connection,
+ * minus the early data sent or received so far.
+ *
+ * @param conn A pointer to the connection
+ * @param allowed_early_data_size A pointer which will be set to the remaining early data currently allowed by `conn`
+ * @return A POSIX error signal.
+ */
+S2N_API int s2n_connection_get_remaining_early_data_size(struct s2n_connection *conn, uint32_t *allowed_early_data_size);
+
+/**
+ * Reports the maximum size of the early data allowed by a connection.
+ *
+ * This is the maximum amount of early data that can ever be sent and received for a connection.
+ * It is not affected by the actual status of the early data, so can be non-zero even if early data
+ * is rejected or not requested.
+ *
+ * @param conn A pointer to the connection
+ * @param max_early_data_size A pointer which will be set to the maximum early data allowed by `conn`
+ * @return A POSIX error signal.
+ */
+S2N_API int s2n_connection_get_max_early_data_size(struct s2n_connection *conn, uint32_t *max_early_data_size);
+
+/**
+ * Called by the client to begin negotiation and send early data.
+ *
+ * See https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md#using-early-data--0rtt
+ * for usage and examples. DO NOT USE unless you have considered the security issues and
+ * implemented mitigation for anti-replay attacks.
+ *
+ * @param conn A pointer to the connection
+ * @param data A pointer to the early data to be sent
+ * @param data_len The size of the early data to send
+ * @param data_sent A pointer which will be set to the size of the early data sent
+ * @param blocked A pointer which will be set to the blocked status, as in `s2n_negotiate`.
+ * @return A POSIX error signal. The error should be handled as in `s2n_negotiate`.
+ */
+S2N_API int s2n_send_early_data(struct s2n_connection *conn, const uint8_t *data, ssize_t data_len,
+ ssize_t *data_sent, s2n_blocked_status *blocked);
+
+/**
+ * Called by the server to begin negotiation and accept any early data the client sends.
+ *
+ * See https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md#using-early-data--0rtt
+ * for usage and examples. DO NOT USE unless you have considered the security issues and
+ * implemented mitigation for anti-replay attacks.
+ *
+ * @param conn A pointer to the connection
+ * @param data A pointer to a buffer to store the early data received
+ * @param max_data_len The size of the early data buffer
+ * @param data_received A pointer which will be set to the size of the early data received
+ * @param blocked A pointer which will be set to the blocked status, as in `s2n_negotiate`.
+ * @return A POSIX error signal. The error should be handled as in `s2n_negotiate`.
+ */
+S2N_API int s2n_recv_early_data(struct s2n_connection *conn, uint8_t *data, ssize_t max_data_len,
+ ssize_t *data_received, s2n_blocked_status *blocked);
+
+struct s2n_offered_early_data;
+
+/**
+ * A callback which can be implemented to accept or reject early data.
+ *
+ * This callback is triggered only after the server has determined early data is otherwise acceptable according
+ * to the TLS early data specification. Implementations therefore only need to cover application-specific checks,
+ * not the standard TLS early data validation.
+ *
+ * This callback can be synchronous or asynchronous. For asynchronous behavior, return success without
+ * calling `s2n_offered_early_data_reject` or `s2n_offered_early_data_accept`. `early_data` will
+ * still be a valid reference, and the connection will block until `s2n_offered_early_data_reject` or
+ * `s2n_offered_early_data_accept` is called.
+ *
+ * @param conn A pointer to the connection
+ * @param early_data A pointer which can be used to access information about the proposed early data
+ * and then accept or reject it.
+ * @return A POSIX error signal. If unsuccessful, the connection will be closed with an error.
+ */
+typedef int (*s2n_early_data_cb)(struct s2n_connection *conn, struct s2n_offered_early_data *early_data);
+
+/**
+ * Set a callback to accept or reject early data.
+ *
+ * @param conn A pointer to the connection
+ * @param cb A pointer to the implementation of the callback.
+ * @return A POSIX error signal. If successful, the callback was set.
+ */
+S2N_API int s2n_config_set_early_data_cb(struct s2n_config *config, s2n_early_data_cb cb);
+
+/**
+ * Get the length of the early data context set by the user.
+ *
+ * @param early_data A pointer to the early data information
+ * @param context_len The length of the user context
+ * @return A POSIX error signal.
+ */
+S2N_API int s2n_offered_early_data_get_context_length(struct s2n_offered_early_data *early_data, uint16_t *context_len);
+
+/**
+ * Get the early data context set by the user.
+ *
+ * @param early_data A pointer to the early data information
+ * @param context A byte buffer to copy the user context into
+ * @param max_len The size of `context`. Must be >= to the result of `s2n_offered_early_data_get_context_length`.
+ * @return A POSIX error signal.
+ */
+S2N_API int s2n_offered_early_data_get_context(struct s2n_offered_early_data *early_data, uint8_t *context, uint16_t max_len);
+
+/**
+ * Reject early data offered by the client.
+ *
+ * @param early_data A pointer to the early data information
+ * @return A POSIX error signal. If success, the client's early data will be rejected.
+ */
+S2N_API int s2n_offered_early_data_reject(struct s2n_offered_early_data *early_data);
+
+/**
+ * Accept early data offered by the client.
+ *
+ * @param early_data A pointer to the early data information
+ * @return A POSIX error signal. If success, the client's early data will be accepted.
+ */
+S2N_API int s2n_offered_early_data_accept(struct s2n_offered_early_data *early_data);
+
#ifdef __cplusplus
}
#endif
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_aes_gcm.c b/contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_aes_gcm.c
index 55418362d0..172736ac1e 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_aes_gcm.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_aes_gcm.c
@@ -49,22 +49,22 @@ static uint8_t s2n_aead_cipher_aes256_gcm_available()
static int s2n_aead_cipher_aes_gcm_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out)
{
- notnull_check(in);
- notnull_check(out);
- notnull_check(iv);
- notnull_check(key);
- notnull_check(aad);
+ POSIX_ENSURE_REF(in);
+ POSIX_ENSURE_REF(out);
+ POSIX_ENSURE_REF(iv);
+ POSIX_ENSURE_REF(key);
+ POSIX_ENSURE_REF(aad);
/* The size of the |in| blob includes the size of the data and the size of the AES-GCM tag */
- gte_check(in->size, S2N_TLS_GCM_TAG_LEN);
- gte_check(out->size, in->size);
- eq_check(iv->size, S2N_TLS_GCM_IV_LEN);
+ POSIX_ENSURE_GTE(in->size, S2N_TLS_GCM_TAG_LEN);
+ POSIX_ENSURE_GTE(out->size, in->size);
+ POSIX_ENSURE_EQ(iv->size, S2N_TLS_GCM_IV_LEN);
/* Adjust input length to account for the Tag length */
size_t in_len = in->size - S2N_TLS_GCM_TAG_LEN;
size_t out_len = 0;
- GUARD_OSSL(EVP_AEAD_CTX_seal(key->evp_aead_ctx, out->data, &out_len, out->size, iv->data, iv->size, in->data, in_len, aad->data, aad->size), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_seal(key->evp_aead_ctx, out->data, &out_len, out->size, iv->data, iv->size, in->data, in_len, aad->data, aad->size), S2N_ERR_ENCRYPT);
S2N_ERROR_IF((in_len + S2N_TLS_GCM_TAG_LEN) != out_len, S2N_ERR_ENCRYPT);
@@ -73,19 +73,19 @@ static int s2n_aead_cipher_aes_gcm_encrypt(struct s2n_session_key *key, struct s
static int s2n_aead_cipher_aes_gcm_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out)
{
- notnull_check(in);
- notnull_check(out);
- notnull_check(iv);
- notnull_check(key);
- notnull_check(aad);
+ POSIX_ENSURE_REF(in);
+ POSIX_ENSURE_REF(out);
+ POSIX_ENSURE_REF(iv);
+ POSIX_ENSURE_REF(key);
+ POSIX_ENSURE_REF(aad);
- gte_check(in->size, S2N_TLS_GCM_TAG_LEN);
- gte_check(out->size, in->size - S2N_TLS_GCM_TAG_LEN);
- eq_check(iv->size, S2N_TLS_GCM_IV_LEN);
+ POSIX_ENSURE_GTE(in->size, S2N_TLS_GCM_TAG_LEN);
+ POSIX_ENSURE_GTE(out->size, in->size - S2N_TLS_GCM_TAG_LEN);
+ POSIX_ENSURE_EQ(iv->size, S2N_TLS_GCM_IV_LEN);
size_t out_len = 0;
- GUARD_OSSL(EVP_AEAD_CTX_open(key->evp_aead_ctx, out->data, &out_len, out->size, iv->data, iv->size, in->data, in->size, aad->data, aad->size), S2N_ERR_DECRYPT);
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_open(key->evp_aead_ctx, out->data, &out_len, out->size, iv->data, iv->size, in->data, in->size, aad->data, aad->size), S2N_ERR_DECRYPT);
S2N_ERROR_IF((in->size - S2N_TLS_GCM_TAG_LEN) != out_len, S2N_ERR_ENCRYPT);
@@ -94,55 +94,103 @@ static int s2n_aead_cipher_aes_gcm_decrypt(struct s2n_session_key *key, struct s
static int s2n_aead_cipher_aes128_gcm_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- notnull_check(key);
- notnull_check(in);
+ POSIX_ENSURE_REF(key);
+ POSIX_ENSURE_REF(in);
- eq_check(in->size, S2N_TLS_AES_128_GCM_KEY_LEN);
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_128_GCM_KEY_LEN);
- GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_128_gcm(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_128_gcm_tls12(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
return S2N_SUCCESS;
}
static int s2n_aead_cipher_aes256_gcm_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- notnull_check(key);
- notnull_check(in);
+ POSIX_ENSURE_REF(key);
+ POSIX_ENSURE_REF(in);
- eq_check(in->size, S2N_TLS_AES_256_GCM_KEY_LEN);
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_256_GCM_KEY_LEN);
- GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_256_gcm(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_256_gcm_tls12(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
return S2N_SUCCESS;
}
static int s2n_aead_cipher_aes128_gcm_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- notnull_check(key);
- notnull_check(in);
+ POSIX_ENSURE_REF(key);
+ POSIX_ENSURE_REF(in);
- eq_check(in->size, S2N_TLS_AES_128_GCM_KEY_LEN);
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_128_GCM_KEY_LEN);
- GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_128_gcm(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_128_gcm_tls12(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
return S2N_SUCCESS;
}
static int s2n_aead_cipher_aes256_gcm_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- notnull_check(key);
- notnull_check(in);
+ POSIX_ENSURE_REF(key);
+ POSIX_ENSURE_REF(in);
- eq_check(in->size, S2N_TLS_AES_256_GCM_KEY_LEN);
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_256_GCM_KEY_LEN);
- GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_256_gcm(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_256_gcm_tls12(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
+
+ return S2N_SUCCESS;
+}
+
+static int s2n_aead_cipher_aes128_gcm_set_encryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in)
+{
+ POSIX_ENSURE_REF(key);
+ POSIX_ENSURE_REF(in);
+
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_128_GCM_KEY_LEN);
+
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_128_gcm_tls13(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
+
+ return S2N_SUCCESS;
+}
+
+static int s2n_aead_cipher_aes256_gcm_set_encryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in)
+{
+ POSIX_ENSURE_REF(key);
+ POSIX_ENSURE_REF(in);
+
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_256_GCM_KEY_LEN);
+
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_256_gcm_tls13(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
+
+ return S2N_SUCCESS;
+}
+
+static int s2n_aead_cipher_aes128_gcm_set_decryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in)
+{
+ POSIX_ENSURE_REF(key);
+ POSIX_ENSURE_REF(in);
+
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_128_GCM_KEY_LEN);
+
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_128_gcm_tls13(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
+
+ return S2N_SUCCESS;
+}
+
+static int s2n_aead_cipher_aes256_gcm_set_decryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in)
+{
+ POSIX_ENSURE_REF(key);
+ POSIX_ENSURE_REF(in);
+
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_256_GCM_KEY_LEN);
+
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_256_gcm_tls13(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
return S2N_SUCCESS;
}
static int s2n_aead_cipher_aes_gcm_init(struct s2n_session_key *key)
{
- notnull_check(key);
+ POSIX_ENSURE_REF(key);
EVP_AEAD_CTX_zero(key->evp_aead_ctx);
@@ -151,7 +199,7 @@ static int s2n_aead_cipher_aes_gcm_init(struct s2n_session_key *key)
static int s2n_aead_cipher_aes_gcm_destroy_key(struct s2n_session_key *key)
{
- notnull_check(key);
+ POSIX_ENSURE_REF(key);
EVP_AEAD_CTX_cleanup(key->evp_aead_ctx);
@@ -163,12 +211,12 @@ static int s2n_aead_cipher_aes_gcm_destroy_key(struct s2n_session_key *key)
static int s2n_aead_cipher_aes_gcm_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out)
{
/* The size of the |in| blob includes the size of the data and the size of the ChaCha20-Poly1305 tag */
- gte_check(in->size, S2N_TLS_GCM_TAG_LEN);
- gte_check(out->size, in->size);
- eq_check(iv->size, S2N_TLS_GCM_IV_LEN);
+ POSIX_ENSURE_GTE(in->size, S2N_TLS_GCM_TAG_LEN);
+ POSIX_ENSURE_GTE(out->size, in->size);
+ POSIX_ENSURE_EQ(iv->size, S2N_TLS_GCM_IV_LEN);
/* Initialize the IV */
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
/* Adjust input length and buffer pointer to account for the Tag length */
int in_len = in->size - S2N_TLS_GCM_TAG_LEN;
@@ -176,19 +224,19 @@ static int s2n_aead_cipher_aes_gcm_encrypt(struct s2n_session_key *key, struct s
int out_len;
/* Specify the AAD */
- GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, NULL, &out_len, aad->data, aad->size), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, NULL, &out_len, aad->data, aad->size), S2N_ERR_ENCRYPT);
/* Encrypt the data */
- GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &out_len, in->data, in_len), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &out_len, in->data, in_len), S2N_ERR_ENCRYPT);
/* When using AES-GCM, *out_len is the number of bytes written by EVP_EncryptUpdate. Since the tag is not written during this call, we do not take S2N_TLS_GCM_TAG_LEN into account */
S2N_ERROR_IF(in_len != out_len, S2N_ERR_ENCRYPT);
/* Finalize */
- GUARD_OSSL(EVP_EncryptFinal_ex(key->evp_cipher_ctx, out->data, &out_len), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_EncryptFinal_ex(key->evp_cipher_ctx, out->data, &out_len), S2N_ERR_ENCRYPT);
/* write the tag */
- GUARD_OSSL(EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_GET_TAG, S2N_TLS_GCM_TAG_LEN, tag_data), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_GET_TAG, S2N_TLS_GCM_TAG_LEN, tag_data), S2N_ERR_ENCRYPT);
/* When using AES-GCM, EVP_EncryptFinal_ex does not write any bytes. So, we should expect *out_len = 0. */
S2N_ERROR_IF(0 != out_len, S2N_ERR_ENCRYPT);
@@ -198,23 +246,23 @@ static int s2n_aead_cipher_aes_gcm_encrypt(struct s2n_session_key *key, struct s
static int s2n_aead_cipher_aes_gcm_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out)
{
- gte_check(in->size, S2N_TLS_GCM_TAG_LEN);
- gte_check(out->size, in->size);
- eq_check(iv->size, S2N_TLS_GCM_IV_LEN);
+ POSIX_ENSURE_GTE(in->size, S2N_TLS_GCM_TAG_LEN);
+ POSIX_ENSURE_GTE(out->size, in->size);
+ POSIX_ENSURE_EQ(iv->size, S2N_TLS_GCM_IV_LEN);
/* Initialize the IV */
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
/* Adjust input length and buffer pointer to account for the Tag length */
int in_len = in->size - S2N_TLS_GCM_TAG_LEN;
uint8_t *tag_data = in->data + in->size - S2N_TLS_GCM_TAG_LEN;
/* Set the TAG */
- GUARD_OSSL(EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_TAG, S2N_TLS_GCM_TAG_LEN, tag_data), S2N_ERR_DECRYPT);
+ POSIX_GUARD_OSSL(EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_TAG, S2N_TLS_GCM_TAG_LEN, tag_data), S2N_ERR_DECRYPT);
int out_len;
/* Specify the AAD */
- GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, NULL, &out_len, aad->data, aad->size), S2N_ERR_DECRYPT);
+ POSIX_GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, NULL, &out_len, aad->data, aad->size), S2N_ERR_DECRYPT);
int evp_decrypt_rc = 1;
/* Decrypt the data, but don't short circuit tag verification. EVP_Decrypt* return 0 on failure, 1 for success. */
@@ -232,52 +280,80 @@ static int s2n_aead_cipher_aes_gcm_decrypt(struct s2n_session_key *key, struct s
static int s2n_aead_cipher_aes128_gcm_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, S2N_TLS_AES_128_GCM_KEY_LEN);
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_128_GCM_KEY_LEN);
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_aes_128_gcm(), NULL, NULL, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_aes_128_gcm(), NULL, NULL, NULL), S2N_ERR_KEY_INIT);
EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_IVLEN, S2N_TLS_GCM_IV_LEN, NULL);
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT);
return S2N_SUCCESS;
}
static int s2n_aead_cipher_aes256_gcm_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, S2N_TLS_AES_256_GCM_KEY_LEN);
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_256_GCM_KEY_LEN);
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL), S2N_ERR_KEY_INIT);
EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_IVLEN, S2N_TLS_GCM_IV_LEN, NULL);
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT);
return S2N_SUCCESS;
}
static int s2n_aead_cipher_aes128_gcm_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, S2N_TLS_AES_128_GCM_KEY_LEN);
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_128_GCM_KEY_LEN);
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_aes_128_gcm(), NULL, NULL, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_aes_128_gcm(), NULL, NULL, NULL), S2N_ERR_KEY_INIT);
EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_IVLEN, S2N_TLS_GCM_IV_LEN, NULL);
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT);
return S2N_SUCCESS;
}
static int s2n_aead_cipher_aes256_gcm_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, S2N_TLS_AES_256_GCM_KEY_LEN);
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_256_GCM_KEY_LEN);
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL), S2N_ERR_KEY_INIT);
EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_IVLEN, S2N_TLS_GCM_IV_LEN, NULL);
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+
+ return S2N_SUCCESS;
+}
+
+static int s2n_aead_cipher_aes128_gcm_set_encryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in)
+{
+ POSIX_GUARD(s2n_aead_cipher_aes128_gcm_set_encryption_key(key, in));
+
+ return S2N_SUCCESS;
+}
+
+static int s2n_aead_cipher_aes256_gcm_set_encryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in)
+{
+ POSIX_GUARD(s2n_aead_cipher_aes256_gcm_set_encryption_key(key, in));
+
+ return S2N_SUCCESS;
+}
+
+static int s2n_aead_cipher_aes128_gcm_set_decryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in)
+{
+ POSIX_GUARD(s2n_aead_cipher_aes128_gcm_set_decryption_key(key, in));
+
+ return S2N_SUCCESS;
+}
+
+static int s2n_aead_cipher_aes256_gcm_set_decryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in)
+{
+ POSIX_GUARD(s2n_aead_cipher_aes256_gcm_set_decryption_key(key, in));
return S2N_SUCCESS;
}
@@ -342,8 +418,8 @@ struct s2n_cipher s2n_tls13_aes128_gcm = {
.encrypt = s2n_aead_cipher_aes_gcm_encrypt},
.is_available = s2n_aead_cipher_aes128_gcm_available,
.init = s2n_aead_cipher_aes_gcm_init,
- .set_encryption_key = s2n_aead_cipher_aes128_gcm_set_encryption_key,
- .set_decryption_key = s2n_aead_cipher_aes128_gcm_set_decryption_key,
+ .set_encryption_key = s2n_aead_cipher_aes128_gcm_set_encryption_key_tls13,
+ .set_decryption_key = s2n_aead_cipher_aes128_gcm_set_decryption_key_tls13,
.destroy_key = s2n_aead_cipher_aes_gcm_destroy_key,
};
@@ -358,7 +434,7 @@ struct s2n_cipher s2n_tls13_aes256_gcm = {
.encrypt = s2n_aead_cipher_aes_gcm_encrypt},
.is_available = s2n_aead_cipher_aes256_gcm_available,
.init = s2n_aead_cipher_aes_gcm_init,
- .set_encryption_key = s2n_aead_cipher_aes256_gcm_set_encryption_key,
- .set_decryption_key = s2n_aead_cipher_aes256_gcm_set_decryption_key,
+ .set_encryption_key = s2n_aead_cipher_aes256_gcm_set_encryption_key_tls13,
+ .set_decryption_key = s2n_aead_cipher_aes256_gcm_set_decryption_key_tls13,
.destroy_key = s2n_aead_cipher_aes_gcm_destroy_key,
};
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_chacha20_poly1305.c b/contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_chacha20_poly1305.c
index 5c395a4b26..a4db7815f2 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_chacha20_poly1305.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_aead_cipher_chacha20_poly1305.c
@@ -49,13 +49,13 @@ static uint8_t s2n_aead_chacha20_poly1305_available(void)
static int s2n_aead_chacha20_poly1305_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out)
{
- gte_check(in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN);
+ POSIX_ENSURE_GTE(in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN);
/* The size of the |in| blob includes the size of the data and the size of the ChaCha20-Poly1305 tag */
- gte_check(out->size, in->size);
- eq_check(iv->size, S2N_TLS_CHACHA20_POLY1305_IV_LEN);
+ POSIX_ENSURE_GTE(out->size, in->size);
+ POSIX_ENSURE_EQ(iv->size, S2N_TLS_CHACHA20_POLY1305_IV_LEN);
/* Initialize the IV */
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
/* Adjust input length and buffer pointer to account for the Tag length */
int in_len = in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN;
@@ -63,19 +63,19 @@ static int s2n_aead_chacha20_poly1305_encrypt(struct s2n_session_key *key, struc
int out_len;
/* Specify the AAD */
- GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, NULL, &out_len, aad->data, aad->size), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, NULL, &out_len, aad->data, aad->size), S2N_ERR_ENCRYPT);
/* Encrypt the data */
- GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &out_len, in->data, in_len), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &out_len, in->data, in_len), S2N_ERR_ENCRYPT);
/* For OpenSSL 1.1.0 and 1.1.1, when using ChaCha20-Poly1305, *out_len is the number of bytes written by EVP_EncryptUpdate. Since the tag is not written during this call, we do not take S2N_TLS_CHACHA20_POLY1305_TAG_LEN into account */
S2N_ERROR_IF(in_len != out_len, S2N_ERR_ENCRYPT);
/* Finalize */
- GUARD_OSSL(EVP_EncryptFinal_ex(key->evp_cipher_ctx, out->data, &out_len), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_EncryptFinal_ex(key->evp_cipher_ctx, out->data, &out_len), S2N_ERR_ENCRYPT);
/* Write the tag */
- GUARD_OSSL(EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_AEAD_GET_TAG, S2N_TLS_CHACHA20_POLY1305_TAG_LEN, tag_data), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_AEAD_GET_TAG, S2N_TLS_CHACHA20_POLY1305_TAG_LEN, tag_data), S2N_ERR_ENCRYPT);
/* For OpenSSL 1.1.0 and 1.1.1, when using ChaCha20-Poly1305, EVP_EncryptFinal_ex does not write any bytes. So, we should expect *out_len = 0. */
S2N_ERROR_IF(0 != out_len, S2N_ERR_ENCRYPT);
@@ -85,23 +85,23 @@ static int s2n_aead_chacha20_poly1305_encrypt(struct s2n_session_key *key, struc
static int s2n_aead_chacha20_poly1305_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out)
{
- gte_check(in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN);
- gte_check(out->size, in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN);
- eq_check(iv->size, S2N_TLS_CHACHA20_POLY1305_IV_LEN);
+ POSIX_ENSURE_GTE(in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN);
+ POSIX_ENSURE_GTE(out->size, in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN);
+ POSIX_ENSURE_EQ(iv->size, S2N_TLS_CHACHA20_POLY1305_IV_LEN);
/* Initialize the IV */
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
/* Adjust input length and buffer pointer to account for the Tag length */
int in_len = in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN;
uint8_t *tag_data = in->data + in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN;
/* Set the TAG */
- GUARD_OSSL(EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_TAG, S2N_TLS_CHACHA20_POLY1305_TAG_LEN, tag_data), S2N_ERR_DECRYPT);
+ POSIX_GUARD_OSSL(EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_TAG, S2N_TLS_CHACHA20_POLY1305_TAG_LEN, tag_data), S2N_ERR_DECRYPT);
int out_len;
/* Specify the AAD */
- GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, NULL, &out_len, aad->data, aad->size), S2N_ERR_DECRYPT);
+ POSIX_GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, NULL, &out_len, aad->data, aad->size), S2N_ERR_DECRYPT);
int evp_decrypt_rc = 1;
/* Decrypt the data, but don't short circuit tag verification. EVP_Decrypt* return 0 on failure, 1 for success. */
@@ -119,26 +119,26 @@ static int s2n_aead_chacha20_poly1305_decrypt(struct s2n_session_key *key, struc
static int s2n_aead_chacha20_poly1305_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, S2N_TLS_CHACHA20_POLY1305_KEY_LEN);
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_CHACHA20_POLY1305_KEY_LEN);
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_chacha20_poly1305(), NULL, NULL, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_chacha20_poly1305(), NULL, NULL, NULL), S2N_ERR_KEY_INIT);
EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_AEAD_SET_IVLEN, S2N_TLS_CHACHA20_POLY1305_IV_LEN, NULL);
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT);
return 0;
}
static int s2n_aead_chacha20_poly1305_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, S2N_TLS_CHACHA20_POLY1305_KEY_LEN);
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_CHACHA20_POLY1305_KEY_LEN);
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_chacha20_poly1305(), NULL, NULL, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_chacha20_poly1305(), NULL, NULL, NULL), S2N_ERR_KEY_INIT);
EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_AEAD_SET_IVLEN, S2N_TLS_CHACHA20_POLY1305_IV_LEN, NULL);
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT);
return 0;
}
@@ -161,16 +161,16 @@ static int s2n_aead_chacha20_poly1305_destroy_key(struct s2n_session_key *key)
static int s2n_aead_chacha20_poly1305_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out)
{
- gte_check(in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN);
+ POSIX_ENSURE_GTE(in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN);
/* The size of the |in| blob includes the size of the data and the size of the ChaCha20-Poly1305 tag */
- gte_check(out->size, in->size);
- eq_check(iv->size, S2N_TLS_CHACHA20_POLY1305_IV_LEN);
+ POSIX_ENSURE_GTE(out->size, in->size);
+ POSIX_ENSURE_EQ(iv->size, S2N_TLS_CHACHA20_POLY1305_IV_LEN);
/* Adjust input length to account for the Tag length */
size_t in_len = in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN;
size_t out_len = 0;
- GUARD_OSSL(EVP_AEAD_CTX_seal(key->evp_aead_ctx, out->data, &out_len, out->size, iv->data, iv->size, in->data, in_len, aad->data, aad->size), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_seal(key->evp_aead_ctx, out->data, &out_len, out->size, iv->data, iv->size, in->data, in_len, aad->data, aad->size), S2N_ERR_ENCRYPT);
S2N_ERROR_IF((in_len + S2N_TLS_CHACHA20_POLY1305_TAG_LEN) != out_len, S2N_ERR_ENCRYPT);
@@ -179,13 +179,13 @@ static int s2n_aead_chacha20_poly1305_encrypt(struct s2n_session_key *key, struc
static int s2n_aead_chacha20_poly1305_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out)
{
- gte_check(in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN);
- gte_check(out->size, in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN);
- eq_check(iv->size, S2N_TLS_CHACHA20_POLY1305_IV_LEN);
+ POSIX_ENSURE_GTE(in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN);
+ POSIX_ENSURE_GTE(out->size, in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN);
+ POSIX_ENSURE_EQ(iv->size, S2N_TLS_CHACHA20_POLY1305_IV_LEN);
size_t out_len = 0;
- GUARD_OSSL(EVP_AEAD_CTX_open(key->evp_aead_ctx, out->data, &out_len, out->size, iv->data, iv->size, in->data, in->size, aad->data, aad->size), S2N_ERR_DECRYPT);
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_open(key->evp_aead_ctx, out->data, &out_len, out->size, iv->data, iv->size, in->data, in->size, aad->data, aad->size), S2N_ERR_DECRYPT);
S2N_ERROR_IF((in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN) != out_len, S2N_ERR_ENCRYPT);
@@ -194,18 +194,18 @@ static int s2n_aead_chacha20_poly1305_decrypt(struct s2n_session_key *key, struc
static int s2n_aead_chacha20_poly1305_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, S2N_TLS_CHACHA20_POLY1305_KEY_LEN);
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_CHACHA20_POLY1305_KEY_LEN);
- GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_chacha20_poly1305(), in->data, in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_chacha20_poly1305(), in->data, in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
return 0;
}
static int s2n_aead_chacha20_poly1305_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, S2N_TLS_CHACHA20_POLY1305_KEY_LEN);
+ POSIX_ENSURE_EQ(in->size, S2N_TLS_CHACHA20_POLY1305_KEY_LEN);
- GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_chacha20_poly1305(), in->data, in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_chacha20_poly1305(), in->data, in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN, NULL), S2N_ERR_KEY_INIT);
return 0;
}
@@ -228,32 +228,32 @@ static int s2n_aead_chacha20_poly1305_destroy_key(struct s2n_session_key *key)
static int s2n_aead_chacha20_poly1305_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out)
{
- S2N_ERROR(S2N_ERR_ENCRYPT);
+ POSIX_BAIL(S2N_ERR_ENCRYPT);
}
static int s2n_aead_chacha20_poly1305_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out)
{
- S2N_ERROR(S2N_ERR_DECRYPT);
+ POSIX_BAIL(S2N_ERR_DECRYPT);
}
static int s2n_aead_chacha20_poly1305_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- S2N_ERROR(S2N_ERR_KEY_INIT);
+ POSIX_BAIL(S2N_ERR_KEY_INIT);
}
static int s2n_aead_chacha20_poly1305_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- S2N_ERROR(S2N_ERR_KEY_INIT);
+ POSIX_BAIL(S2N_ERR_KEY_INIT);
}
static int s2n_aead_chacha20_poly1305_init(struct s2n_session_key *key)
{
- S2N_ERROR(S2N_ERR_KEY_INIT);
+ POSIX_BAIL(S2N_ERR_KEY_INIT);
}
static int s2n_aead_chacha20_poly1305_destroy_key(struct s2n_session_key *key)
{
- S2N_ERROR(S2N_ERR_KEY_DESTROY);
+ POSIX_BAIL(S2N_ERR_KEY_DESTROY);
}
#endif
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_cbc_cipher_3des.c b/contrib/restricted/aws/s2n/crypto/s2n_cbc_cipher_3des.c
index dcd190bd8a..9a3e440653 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_cbc_cipher_3des.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_cbc_cipher_3des.c
@@ -30,12 +30,12 @@ static uint8_t s2n_cbc_cipher_3des_available()
static int s2n_cbc_cipher_3des_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out)
{
- gte_check(out->size, in->size);
+ POSIX_ENSURE_GTE(out->size, in->size);
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
int len = out->size;
- GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT);
S2N_ERROR_IF(len != in->size, S2N_ERR_ENCRYPT);
return 0;
@@ -43,32 +43,32 @@ static int s2n_cbc_cipher_3des_encrypt(struct s2n_session_key *key, struct s2n_b
static int s2n_cbc_cipher_3des_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out)
{
- gte_check(out->size, in->size);
+ POSIX_ENSURE_GTE(out->size, in->size);
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
int len = out->size;
- GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_DECRYPT);
+ POSIX_GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_DECRYPT);
return 0;
}
static int s2n_cbc_cipher_3des_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 192 / 8);
+ POSIX_ENSURE_EQ(in->size, 192 / 8);
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_des_ede3_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_des_ede3_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
return 0;
}
static int s2n_cbc_cipher_3des_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 192 / 8);
+ POSIX_ENSURE_EQ(in->size, 192 / 8);
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_des_ede3_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_des_ede3_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
return 0;
}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_cbc_cipher_aes.c b/contrib/restricted/aws/s2n/crypto/s2n_cbc_cipher_aes.c
index 2a0fbaf66e..e737242ff0 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_cbc_cipher_aes.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_cbc_cipher_aes.c
@@ -35,12 +35,12 @@ static uint8_t s2n_cbc_cipher_aes256_available()
static int s2n_cbc_cipher_aes_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out)
{
- gte_check(out->size, in->size);
+ POSIX_ENSURE_GTE(out->size, in->size);
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
int len = out->size;
- GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT);
S2N_ERROR_IF(len != in->size, S2N_ERR_ENCRYPT);
return 0;
@@ -48,52 +48,52 @@ static int s2n_cbc_cipher_aes_encrypt(struct s2n_session_key *key, struct s2n_bl
int s2n_cbc_cipher_aes_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out)
{
- gte_check(out->size, in->size);
+ POSIX_ENSURE_GTE(out->size, in->size);
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
int len = out->size;
- GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_DECRYPT);
+ POSIX_GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_DECRYPT);
return 0;
}
int s2n_cbc_cipher_aes128_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 128 / 8);
+ POSIX_ENSURE_EQ(in->size, 128 / 8);
/* Always returns 1 */
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_aes_128_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_aes_128_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
return 0;
}
static int s2n_cbc_cipher_aes128_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 128 / 8);
+ POSIX_ENSURE_EQ(in->size, 128 / 8);
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_aes_128_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_aes_128_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
return 0;
}
static int s2n_cbc_cipher_aes256_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 256 / 8);
+ POSIX_ENSURE_EQ(in->size, 256 / 8);
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_aes_256_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_aes_256_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
return 0;
}
int s2n_cbc_cipher_aes256_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 256 / 8);
+ POSIX_ENSURE_EQ(in->size, 256 / 8);
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_aes_256_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_aes_256_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
return 0;
}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_certificate.c b/contrib/restricted/aws/s2n/crypto/s2n_certificate.c
index 39646c3f28..f1a71a0332 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_certificate.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_certificate.c
@@ -17,13 +17,14 @@
# define _GNU_SOURCE
#endif
-#include <s2n.h>
+#include "api/s2n.h"
#include <openssl/x509v3.h>
#include <openssl/pem.h>
#include <string.h>
#include <strings.h>
#include "crypto/s2n_certificate.h"
+#include "crypto/s2n_openssl_x509.h"
#include "utils/s2n_array.h"
#include "utils/s2n_safety.h"
#include "utils/s2n_mem.h"
@@ -33,16 +34,16 @@
int s2n_cert_set_cert_type(struct s2n_cert *cert, s2n_pkey_type pkey_type)
{
- notnull_check(cert);
+ POSIX_ENSURE_REF(cert);
cert->pkey_type = pkey_type;
- GUARD(s2n_pkey_setup_for_type(&cert->public_key, pkey_type));
+ POSIX_GUARD(s2n_pkey_setup_for_type(&cert->public_key, pkey_type));
return 0;
}
int s2n_create_cert_chain_from_stuffer(struct s2n_cert_chain *cert_chain_out, struct s2n_stuffer *chain_in_stuffer)
{
DEFER_CLEANUP(struct s2n_stuffer cert_out_stuffer = {0}, s2n_stuffer_free);
- GUARD(s2n_stuffer_growable_alloc(&cert_out_stuffer, 2048));
+ POSIX_GUARD(s2n_stuffer_growable_alloc(&cert_out_stuffer, 2048));
struct s2n_cert **insert = &cert_chain_out->head;
uint32_t chain_size = 0;
@@ -51,20 +52,20 @@ int s2n_create_cert_chain_from_stuffer(struct s2n_cert_chain *cert_chain_out, st
if (s2n_stuffer_certificate_from_pem(chain_in_stuffer, &cert_out_stuffer) < 0) {
if (chain_size == 0) {
- S2N_ERROR(S2N_ERR_NO_CERTIFICATE_IN_PEM);
+ POSIX_BAIL(S2N_ERR_NO_CERTIFICATE_IN_PEM);
}
break;
}
struct s2n_blob mem = {0};
- GUARD(s2n_alloc(&mem, sizeof(struct s2n_cert)));
+ POSIX_GUARD(s2n_alloc(&mem, sizeof(struct s2n_cert)));
new_node = (struct s2n_cert *)(void *)mem.data;
if (s2n_alloc(&new_node->raw, s2n_stuffer_data_available(&cert_out_stuffer)) != S2N_SUCCESS) {
- GUARD(s2n_free(&mem));
+ POSIX_GUARD(s2n_free(&mem));
S2N_ERROR_PRESERVE_ERRNO();
}
if (s2n_stuffer_read(&cert_out_stuffer, &new_node->raw) != S2N_SUCCESS) {
- GUARD(s2n_free(&mem));
+ POSIX_GUARD(s2n_free(&mem));
S2N_ERROR_PRESERVE_ERRNO();
}
@@ -91,61 +92,93 @@ int s2n_cert_chain_and_key_set_cert_chain_from_stuffer(struct s2n_cert_chain_and
return s2n_create_cert_chain_from_stuffer(cert_and_key->cert_chain, chain_in_stuffer);
}
+int s2n_cert_chain_and_key_set_cert_chain_bytes(struct s2n_cert_chain_and_key *cert_and_key, uint8_t *cert_chain_pem, uint32_t cert_chain_len)
+{
+ DEFER_CLEANUP(struct s2n_stuffer chain_in_stuffer = {0}, s2n_stuffer_free);
+
+ POSIX_GUARD(s2n_stuffer_init_ro_from_string(&chain_in_stuffer, cert_chain_pem, cert_chain_len));
+ POSIX_GUARD(s2n_cert_chain_and_key_set_cert_chain_from_stuffer(cert_and_key, &chain_in_stuffer));
+
+ return S2N_SUCCESS;
+}
+
int s2n_cert_chain_and_key_set_cert_chain(struct s2n_cert_chain_and_key *cert_and_key, const char *cert_chain_pem)
{
- struct s2n_stuffer chain_in_stuffer = {0};
+ DEFER_CLEANUP(struct s2n_stuffer chain_in_stuffer = {0}, s2n_stuffer_free);
/* Turn the chain into a stuffer */
- GUARD(s2n_stuffer_alloc_ro_from_string(&chain_in_stuffer, cert_chain_pem));
- int rc = s2n_cert_chain_and_key_set_cert_chain_from_stuffer(cert_and_key, &chain_in_stuffer);
+ POSIX_GUARD(s2n_stuffer_alloc_ro_from_string(&chain_in_stuffer, cert_chain_pem));
+ POSIX_GUARD(s2n_cert_chain_and_key_set_cert_chain_from_stuffer(cert_and_key, &chain_in_stuffer));
- GUARD(s2n_stuffer_free(&chain_in_stuffer));
+ return S2N_SUCCESS;
+}
+
+int s2n_cert_chain_and_key_set_private_key_from_stuffer(struct s2n_cert_chain_and_key *cert_and_key, struct s2n_stuffer *key_in_stuffer, struct s2n_stuffer *key_out_stuffer)
+{
+ struct s2n_blob key_blob = {0};
- return rc;
+ POSIX_GUARD(s2n_pkey_zero_init(cert_and_key->private_key));
+
+ /* Convert pem to asn1 and asn1 to the private key. Handles both PKCS#1 and PKCS#8 formats */
+ POSIX_GUARD(s2n_stuffer_private_key_from_pem(key_in_stuffer, key_out_stuffer));
+ key_blob.size = s2n_stuffer_data_available(key_out_stuffer);
+ key_blob.data = s2n_stuffer_raw_read(key_out_stuffer, key_blob.size);
+ POSIX_ENSURE_REF(key_blob.data);
+
+ /* Get key type and create appropriate key context */
+ POSIX_GUARD(s2n_asn1der_to_private_key(cert_and_key->private_key, &key_blob));
+
+ return S2N_SUCCESS;
}
-int s2n_cert_chain_and_key_set_private_key(struct s2n_cert_chain_and_key *cert_and_key, const char *private_key_pem)
+int s2n_cert_chain_and_key_set_private_key_bytes(struct s2n_cert_chain_and_key *cert_and_key, uint8_t *private_key_pem, uint32_t private_key_len)
{
DEFER_CLEANUP(struct s2n_stuffer key_in_stuffer = {0}, s2n_stuffer_free);
DEFER_CLEANUP(struct s2n_stuffer key_out_stuffer = {0}, s2n_stuffer_free);
- struct s2n_blob key_blob = {0};
-
- GUARD(s2n_pkey_zero_init(cert_and_key->private_key));
/* Put the private key pem in a stuffer */
- GUARD(s2n_stuffer_alloc_ro_from_string(&key_in_stuffer, private_key_pem));
- GUARD(s2n_stuffer_growable_alloc(&key_out_stuffer, strlen(private_key_pem)));
+ POSIX_GUARD(s2n_stuffer_init_ro_from_string(&key_in_stuffer, private_key_pem, private_key_len));
+ POSIX_GUARD(s2n_stuffer_growable_alloc(&key_out_stuffer, private_key_len));
- /* Convert pem to asn1 and asn1 to the private key. Handles both PKCS#1 and PKCS#8 formats */
- GUARD(s2n_stuffer_private_key_from_pem(&key_in_stuffer, &key_out_stuffer));
- key_blob.size = s2n_stuffer_data_available(&key_out_stuffer);
- key_blob.data = s2n_stuffer_raw_read(&key_out_stuffer, key_blob.size);
- notnull_check(key_blob.data);
+ POSIX_GUARD(s2n_cert_chain_and_key_set_private_key_from_stuffer(cert_and_key, &key_in_stuffer, &key_out_stuffer));
- /* Get key type and create appropriate key context */
- GUARD(s2n_asn1der_to_private_key(cert_and_key->private_key, &key_blob));
+ return S2N_SUCCESS;
+}
- return 0;
+int s2n_cert_chain_and_key_set_private_key(struct s2n_cert_chain_and_key *cert_and_key, const char *private_key_pem)
+{
+ POSIX_ENSURE_REF(private_key_pem);
+
+ DEFER_CLEANUP(struct s2n_stuffer key_in_stuffer = {0}, s2n_stuffer_free);
+ DEFER_CLEANUP(struct s2n_stuffer key_out_stuffer = {0}, s2n_stuffer_free);
+
+ /* Put the private key pem in a stuffer */
+ POSIX_GUARD(s2n_stuffer_alloc_ro_from_string(&key_in_stuffer, private_key_pem));
+ POSIX_GUARD(s2n_stuffer_growable_alloc(&key_out_stuffer, strlen(private_key_pem)));
+
+ POSIX_GUARD(s2n_cert_chain_and_key_set_private_key_from_stuffer(cert_and_key, &key_in_stuffer, &key_out_stuffer));
+
+ return S2N_SUCCESS;
}
int s2n_cert_chain_and_key_set_ocsp_data(struct s2n_cert_chain_and_key *chain_and_key, const uint8_t *data, uint32_t length)
{
- notnull_check(chain_and_key);
- GUARD(s2n_free(&chain_and_key->ocsp_status));
+ POSIX_ENSURE_REF(chain_and_key);
+ POSIX_GUARD(s2n_free(&chain_and_key->ocsp_status));
if (data && length) {
- GUARD(s2n_alloc(&chain_and_key->ocsp_status, length));
- memcpy_check(chain_and_key->ocsp_status.data, data, length);
+ POSIX_GUARD(s2n_alloc(&chain_and_key->ocsp_status, length));
+ POSIX_CHECKED_MEMCPY(chain_and_key->ocsp_status.data, data, length);
}
return 0;
}
int s2n_cert_chain_and_key_set_sct_list(struct s2n_cert_chain_and_key *chain_and_key, const uint8_t *data, uint32_t length)
{
- notnull_check(chain_and_key);
- GUARD(s2n_free(&chain_and_key->sct_list));
+ POSIX_ENSURE_REF(chain_and_key);
+ POSIX_GUARD(s2n_free(&chain_and_key->sct_list));
if (data && length) {
- GUARD(s2n_alloc(&chain_and_key->sct_list, length));
- memcpy_check(chain_and_key->sct_list.data, data, length);
+ POSIX_GUARD(s2n_alloc(&chain_and_key->sct_list, length));
+ POSIX_CHECKED_MEMCPY(chain_and_key->sct_list.data, data, length);
}
return 0;
}
@@ -155,7 +188,7 @@ struct s2n_cert_chain_and_key *s2n_cert_chain_and_key_new(void)
struct s2n_cert_chain_and_key *chain_and_key;
struct s2n_blob chain_and_key_mem, cert_chain_mem, pkey_mem;
- GUARD_PTR(s2n_alloc(&chain_and_key_mem, sizeof(struct s2n_cert_chain_and_key)));
+ PTR_GUARD_POSIX(s2n_alloc(&chain_and_key_mem, sizeof(struct s2n_cert_chain_and_key)));
chain_and_key = (struct s2n_cert_chain_and_key *)(void *)chain_and_key_mem.data;
/* Allocate the memory for the chain and key */
@@ -199,7 +232,7 @@ DEFINE_POINTER_CLEANUP_FUNC(GENERAL_NAMES *, GENERAL_NAMES_free);
int s2n_cert_chain_and_key_load_sans(struct s2n_cert_chain_and_key *chain_and_key, X509 *x509_cert)
{
- notnull_check(chain_and_key->san_names);
+ POSIX_ENSURE_REF(chain_and_key->san_names);
DEFER_CLEANUP(GENERAL_NAMES *san_names = X509_get_ext_d2i(x509_cert, NID_subject_alt_name, NULL, NULL), GENERAL_NAMES_free_pointer);
if (san_names == NULL) {
@@ -219,19 +252,19 @@ int s2n_cert_chain_and_key_load_sans(struct s2n_cert_chain_and_key *chain_and_ke
unsigned char *san_str = san_name->d.dNSName->data;
const size_t san_str_len = san_name->d.dNSName->length;
struct s2n_blob *san_blob = NULL;
- GUARD_AS_POSIX(s2n_array_pushback(chain_and_key->san_names, (void **)&san_blob));
+ POSIX_GUARD_RESULT(s2n_array_pushback(chain_and_key->san_names, (void **)&san_blob));
if (!san_blob) {
- S2N_ERROR(S2N_ERR_NULL_SANS);
+ POSIX_BAIL(S2N_ERR_NULL_SANS);
}
if (s2n_alloc(san_blob, san_str_len)) {
S2N_ERROR_PRESERVE_ERRNO();
}
- memcpy_check(san_blob->data, san_str, san_str_len);
+ POSIX_CHECKED_MEMCPY(san_blob->data, san_str, san_str_len);
san_blob->size = san_str_len;
/* normalize san_blob to lowercase */
- GUARD(s2n_blob_char_to_lower(san_blob));
+ POSIX_GUARD(s2n_blob_char_to_lower(san_blob));
}
}
@@ -251,7 +284,7 @@ DEFINE_POINTER_CLEANUP_FUNC(unsigned char *, OPENSSL_free);
int s2n_cert_chain_and_key_load_cns(struct s2n_cert_chain_and_key *chain_and_key, X509 *x509_cert)
{
- notnull_check(chain_and_key->cn_names);
+ POSIX_ENSURE_REF(chain_and_key->cn_names);
X509_NAME *subject = X509_get_subject_name(x509_cert);
if (!subject) {
@@ -284,18 +317,18 @@ int s2n_cert_chain_and_key_load_cns(struct s2n_cert_chain_and_key *chain_and_key
OPENSSL_free(utf8_str);
} else {
struct s2n_blob *cn_name = NULL;
- GUARD_AS_POSIX(s2n_array_pushback(chain_and_key->cn_names, (void **)&cn_name));
+ POSIX_GUARD_RESULT(s2n_array_pushback(chain_and_key->cn_names, (void **)&cn_name));
if (cn_name == NULL) {
- S2N_ERROR(S2N_ERR_NULL_CN_NAME);
+ POSIX_BAIL(S2N_ERR_NULL_CN_NAME);
}
if (s2n_alloc(cn_name, utf8_out_len) < 0) {
S2N_ERROR_PRESERVE_ERRNO();
}
- memcpy_check(cn_name->data, utf8_str, utf8_out_len);
+ POSIX_CHECKED_MEMCPY(cn_name->data, utf8_str, utf8_out_len);
cn_name->size = utf8_out_len;
/* normalize cn_name to lowercase */
- GUARD(s2n_blob_char_to_lower(cn_name));
+ POSIX_GUARD(s2n_blob_char_to_lower(cn_name));
}
}
@@ -307,41 +340,92 @@ static int s2n_cert_chain_and_key_set_names(struct s2n_cert_chain_and_key *chain
const unsigned char *leaf_der = leaf_bytes->data;
X509 *cert = d2i_X509(NULL, &leaf_der, leaf_bytes->size);
if (!cert) {
- S2N_ERROR(S2N_ERR_INVALID_PEM);
+ POSIX_BAIL(S2N_ERR_INVALID_PEM);
}
- GUARD(s2n_cert_chain_and_key_load_sans(chain_and_key, cert));
+ POSIX_GUARD(s2n_cert_chain_and_key_load_sans(chain_and_key, cert));
/* For current use cases, we *could* avoid populating the common names if any sans were loaded in
* s2n_cert_chain_and_key_load_sans. Let's unconditionally populate this field to avoid surprises
* in the future.
*/
- GUARD(s2n_cert_chain_and_key_load_cns(chain_and_key, cert));
+ POSIX_GUARD(s2n_cert_chain_and_key_load_cns(chain_and_key, cert));
X509_free(cert);
return 0;
}
-int s2n_cert_chain_and_key_load_pem(struct s2n_cert_chain_and_key *chain_and_key, const char *chain_pem, const char *private_key_pem)
+int s2n_cert_chain_and_key_load(struct s2n_cert_chain_and_key *chain_and_key)
{
- notnull_check(chain_and_key);
-
- GUARD(s2n_cert_chain_and_key_set_cert_chain(chain_and_key, chain_pem));
- GUARD(s2n_cert_chain_and_key_set_private_key(chain_and_key, private_key_pem));
+ POSIX_ENSURE_REF(chain_and_key);
+ POSIX_ENSURE_REF(chain_and_key->cert_chain);
+ POSIX_ENSURE_REF(chain_and_key->cert_chain->head);
+ POSIX_ENSURE_REF(chain_and_key->private_key);
+ struct s2n_cert *head = chain_and_key->cert_chain->head;
/* Parse the leaf cert for the public key and certificate type */
DEFER_CLEANUP(struct s2n_pkey public_key = {0}, s2n_pkey_free);
s2n_pkey_type pkey_type = S2N_PKEY_TYPE_UNKNOWN;
- GUARD(s2n_asn1der_to_public_key_and_type(&public_key, &pkey_type, &chain_and_key->cert_chain->head->raw));
- S2N_ERROR_IF(pkey_type == S2N_PKEY_TYPE_UNKNOWN, S2N_ERR_CERT_TYPE_UNSUPPORTED);
- GUARD(s2n_cert_set_cert_type(chain_and_key->cert_chain->head, pkey_type));
+ POSIX_GUARD(s2n_asn1der_to_public_key_and_type(&public_key, &pkey_type, &head->raw));
+ POSIX_ENSURE(pkey_type != S2N_PKEY_TYPE_UNKNOWN, S2N_ERR_CERT_TYPE_UNSUPPORTED);
+ POSIX_GUARD(s2n_cert_set_cert_type(head, pkey_type));
/* Validate the leaf cert's public key matches the provided private key */
- GUARD(s2n_pkey_match(&public_key, chain_and_key->private_key));
+ if (s2n_pkey_check_key_exists(chain_and_key->private_key) == S2N_SUCCESS) {
+ POSIX_GUARD(s2n_pkey_match(&public_key, chain_and_key->private_key));
+ }
/* Populate name information from the SAN/CN for the leaf certificate */
- GUARD(s2n_cert_chain_and_key_set_names(chain_and_key, &chain_and_key->cert_chain->head->raw));
+ POSIX_GUARD(s2n_cert_chain_and_key_set_names(chain_and_key, &head->raw));
+
+ /* Populate ec curve libcrypto nid */
+ if (pkey_type == S2N_PKEY_TYPE_ECDSA) {
+ int nid = EC_GROUP_get_curve_name(EC_KEY_get0_group(public_key.key.ecdsa_key.ec_key));
+ POSIX_ENSURE(nid > 0, S2N_ERR_CERT_TYPE_UNSUPPORTED);
+ POSIX_ENSURE(nid < UINT16_MAX, S2N_ERR_CERT_TYPE_UNSUPPORTED);
+ head->ec_curve_nid = nid;
+ }
- return 0;
+ return S2N_SUCCESS;
+}
+
+int s2n_cert_chain_and_key_load_pem(struct s2n_cert_chain_and_key *chain_and_key, const char *chain_pem, const char *private_key_pem)
+{
+ POSIX_ENSURE_REF(chain_and_key);
+
+ POSIX_GUARD(s2n_cert_chain_and_key_set_cert_chain(chain_and_key, chain_pem));
+ POSIX_GUARD(s2n_cert_chain_and_key_set_private_key(chain_and_key, private_key_pem));
+
+ POSIX_GUARD(s2n_cert_chain_and_key_load(chain_and_key));
+
+ return S2N_SUCCESS;
+}
+
+int s2n_cert_chain_and_key_load_public_pem_bytes(struct s2n_cert_chain_and_key *chain_and_key, uint8_t *chain_pem, uint32_t chain_pem_len)
+{
+ POSIX_GUARD(s2n_cert_chain_and_key_set_cert_chain_bytes(chain_and_key, chain_pem, chain_pem_len));
+ POSIX_GUARD(s2n_cert_chain_and_key_load(chain_and_key));
+ return S2N_SUCCESS;
+}
+
+int s2n_cert_chain_and_key_load_pem_bytes(struct s2n_cert_chain_and_key *chain_and_key, uint8_t *chain_pem,
+ uint32_t chain_pem_len, uint8_t *private_key_pem, uint32_t private_key_pem_len)
+{
+ POSIX_ENSURE_REF(chain_and_key);
+
+ POSIX_GUARD(s2n_cert_chain_and_key_set_cert_chain_bytes(chain_and_key, chain_pem, chain_pem_len));
+ POSIX_GUARD(s2n_cert_chain_and_key_set_private_key_bytes(chain_and_key, private_key_pem, private_key_pem_len));
+
+ POSIX_GUARD(s2n_cert_chain_and_key_load(chain_and_key));
+
+ return S2N_SUCCESS;
+}
+
+S2N_CLEANUP_RESULT s2n_cert_chain_and_key_ptr_free(struct s2n_cert_chain_and_key **cert_and_key)
+{
+ RESULT_ENSURE_REF(cert_and_key);
+ RESULT_GUARD_POSIX(s2n_cert_chain_and_key_free(*cert_and_key));
+ *cert_and_key = NULL;
+ return S2N_RESULT_OK;
}
int s2n_cert_chain_and_key_free(struct s2n_cert_chain_and_key *cert_and_key)
@@ -355,72 +439,91 @@ int s2n_cert_chain_and_key_free(struct s2n_cert_chain_and_key *cert_and_key)
struct s2n_cert *node = cert_and_key->cert_chain->head;
while (node) {
/* Free the cert */
- GUARD(s2n_free(&node->raw));
+ POSIX_GUARD(s2n_free(&node->raw));
/* update head so it won't point to freed memory */
cert_and_key->cert_chain->head = node->next;
/* Free the node */
- GUARD(s2n_free_object((uint8_t **)&node, sizeof(struct s2n_cert)));
+ POSIX_GUARD(s2n_free_object((uint8_t **)&node, sizeof(struct s2n_cert)));
node = cert_and_key->cert_chain->head;
}
- GUARD(s2n_free_object((uint8_t **)&cert_and_key->cert_chain, sizeof(struct s2n_cert_chain)));
+ POSIX_GUARD(s2n_free_object((uint8_t **)&cert_and_key->cert_chain, sizeof(struct s2n_cert_chain)));
}
if (cert_and_key->private_key) {
- GUARD(s2n_pkey_free(cert_and_key->private_key));
- GUARD(s2n_free_object((uint8_t **)&cert_and_key->private_key, sizeof(s2n_cert_private_key)));
+ POSIX_GUARD(s2n_pkey_free(cert_and_key->private_key));
+ POSIX_GUARD(s2n_free_object((uint8_t **)&cert_and_key->private_key, sizeof(s2n_cert_private_key)));
}
uint32_t len = 0;
if (cert_and_key->san_names) {
- GUARD_AS_POSIX(s2n_array_num_elements(cert_and_key->san_names, &len));
+ POSIX_GUARD_RESULT(s2n_array_num_elements(cert_and_key->san_names, &len));
for (uint32_t i = 0; i < len; i++) {
struct s2n_blob *san_name = NULL;
- GUARD_AS_POSIX(s2n_array_get(cert_and_key->san_names, i, (void **)&san_name));
- GUARD(s2n_free(san_name));
+ POSIX_GUARD_RESULT(s2n_array_get(cert_and_key->san_names, i, (void **)&san_name));
+ POSIX_GUARD(s2n_free(san_name));
}
- GUARD_AS_POSIX(s2n_array_free(cert_and_key->san_names));
+ POSIX_GUARD_RESULT(s2n_array_free(cert_and_key->san_names));
cert_and_key->san_names = NULL;
}
if (cert_and_key->cn_names) {
- GUARD_AS_POSIX(s2n_array_num_elements(cert_and_key->cn_names, &len));
+ POSIX_GUARD_RESULT(s2n_array_num_elements(cert_and_key->cn_names, &len));
for (uint32_t i = 0; i < len; i++) {
struct s2n_blob *cn_name = NULL;
- GUARD_AS_POSIX(s2n_array_get(cert_and_key->cn_names, i, (void **)&cn_name));
- GUARD(s2n_free(cn_name));
+ POSIX_GUARD_RESULT(s2n_array_get(cert_and_key->cn_names, i, (void **)&cn_name));
+ POSIX_GUARD(s2n_free(cn_name));
}
- GUARD_AS_POSIX(s2n_array_free(cert_and_key->cn_names));
+ POSIX_GUARD_RESULT(s2n_array_free(cert_and_key->cn_names));
cert_and_key->cn_names = NULL;
}
- GUARD(s2n_free(&cert_and_key->ocsp_status));
- GUARD(s2n_free(&cert_and_key->sct_list));
+ POSIX_GUARD(s2n_free(&cert_and_key->ocsp_status));
+ POSIX_GUARD(s2n_free(&cert_and_key->sct_list));
- GUARD(s2n_free_object((uint8_t **)&cert_and_key, sizeof(struct s2n_cert_chain_and_key)));
+ POSIX_GUARD(s2n_free_object((uint8_t **)&cert_and_key, sizeof(struct s2n_cert_chain_and_key)));
return 0;
}
+int s2n_cert_chain_free(struct s2n_cert_chain *cert_chain)
+{
+ /* Walk the chain and free the certs/nodes allocated prior to failure */
+ if (cert_chain) {
+ struct s2n_cert *node = cert_chain->head;
+ while (node) {
+ /* Free the cert */
+ POSIX_GUARD(s2n_free(&node->raw));
+ /* update head so it won't point to freed memory */
+ cert_chain->head = node->next;
+ /* Free the node */
+ POSIX_GUARD(s2n_free_object((uint8_t **)&node, sizeof(struct s2n_cert)));
+ node = cert_chain->head;
+ }
+ }
+
+ return S2N_SUCCESS;
+}
+
int s2n_send_cert_chain(struct s2n_connection *conn, struct s2n_stuffer *out, struct s2n_cert_chain_and_key *chain_and_key)
{
- notnull_check(conn);
- notnull_check(out);
- notnull_check(chain_and_key);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(out);
+ POSIX_ENSURE_REF(chain_and_key);
struct s2n_cert_chain *chain = chain_and_key->cert_chain;
- notnull_check(chain);
+ POSIX_ENSURE_REF(chain);
struct s2n_cert *cur_cert = chain->head;
- notnull_check(cur_cert);
+ POSIX_ENSURE_REF(cur_cert);
struct s2n_stuffer_reservation cert_chain_size = {0};
- GUARD(s2n_stuffer_reserve_uint24(out, &cert_chain_size));
+ POSIX_GUARD(s2n_stuffer_reserve_uint24(out, &cert_chain_size));
/* Send certs and extensions (in TLS 1.3) */
bool first_entry = true;
while (cur_cert) {
- notnull_check(cur_cert);
- GUARD(s2n_stuffer_write_uint24(out, cur_cert->raw.size));
- GUARD(s2n_stuffer_write_bytes(out, cur_cert->raw.data, cur_cert->raw.size));
+ POSIX_ENSURE_REF(cur_cert);
+ POSIX_GUARD(s2n_stuffer_write_uint24(out, cur_cert->raw.size));
+ POSIX_GUARD(s2n_stuffer_write_bytes(out, cur_cert->raw.data, cur_cert->raw.size));
/* According to https://tools.ietf.org/html/rfc8446#section-4.4.2,
* If an extension applies to the entire chain, it SHOULD be included in
@@ -429,39 +532,39 @@ int s2n_send_cert_chain(struct s2n_connection *conn, struct s2n_stuffer *out, st
* entries, only the first matter to use here */
if (conn->actual_protocol_version >= S2N_TLS13) {
if (first_entry) {
- GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_CERTIFICATE, conn, out));
+ POSIX_GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_CERTIFICATE, conn, out));
first_entry = false;
} else {
- GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_EMPTY, conn, out));
+ POSIX_GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_EMPTY, conn, out));
}
}
cur_cert = cur_cert->next;
}
- GUARD(s2n_stuffer_write_vector_size(&cert_chain_size));
+ POSIX_GUARD(s2n_stuffer_write_vector_size(&cert_chain_size));
return 0;
}
int s2n_send_empty_cert_chain(struct s2n_stuffer *out)
{
- notnull_check(out);
- GUARD(s2n_stuffer_write_uint24(out, 0));
+ POSIX_ENSURE_REF(out);
+ POSIX_GUARD(s2n_stuffer_write_uint24(out, 0));
return 0;
}
static int s2n_does_cert_san_match_hostname(const struct s2n_cert_chain_and_key *chain_and_key, const struct s2n_blob *dns_name)
{
- notnull_check(chain_and_key);
- notnull_check(dns_name);
+ POSIX_ENSURE_REF(chain_and_key);
+ POSIX_ENSURE_REF(dns_name);
struct s2n_array *san_names = chain_and_key->san_names;
uint32_t len = 0;
- GUARD_AS_POSIX(s2n_array_num_elements(san_names, &len));
+ POSIX_GUARD_RESULT(s2n_array_num_elements(san_names, &len));
for (uint32_t i = 0; i < len; i++) {
struct s2n_blob *san_name = NULL;
- GUARD_AS_POSIX(s2n_array_get(san_names, i, (void **)&san_name));
- notnull_check(san_name);
+ POSIX_GUARD_RESULT(s2n_array_get(san_names, i, (void **)&san_name));
+ POSIX_ENSURE_REF(san_name);
if ((dns_name->size == san_name->size) && (strncasecmp((const char *) dns_name->data, (const char *) san_name->data, dns_name->size) == 0)) {
return 1;
}
@@ -472,16 +575,16 @@ static int s2n_does_cert_san_match_hostname(const struct s2n_cert_chain_and_key
static int s2n_does_cert_cn_match_hostname(const struct s2n_cert_chain_and_key *chain_and_key, const struct s2n_blob *dns_name)
{
- notnull_check(chain_and_key);
- notnull_check(dns_name);
+ POSIX_ENSURE_REF(chain_and_key);
+ POSIX_ENSURE_REF(dns_name);
struct s2n_array *cn_names = chain_and_key->cn_names;
uint32_t len = 0;
- GUARD_AS_POSIX(s2n_array_num_elements(cn_names, &len));
+ POSIX_GUARD_RESULT(s2n_array_num_elements(cn_names, &len));
for (uint32_t i = 0; i < len; i++) {
struct s2n_blob *cn_name = NULL;
- GUARD_AS_POSIX(s2n_array_get(cn_names, i, (void **)&cn_name));
- notnull_check(cn_name);
+ POSIX_GUARD_RESULT(s2n_array_get(cn_names, i, (void **)&cn_name));
+ POSIX_ENSURE_REF(cn_name);
if ((dns_name->size == cn_name->size) && (strncasecmp((const char *) dns_name->data, (const char *) cn_name->data, dns_name->size) == 0)) {
return 1;
}
@@ -493,7 +596,7 @@ static int s2n_does_cert_cn_match_hostname(const struct s2n_cert_chain_and_key *
int s2n_cert_chain_and_key_matches_dns_name(const struct s2n_cert_chain_and_key *chain_and_key, const struct s2n_blob *dns_name)
{
uint32_t len = 0;
- GUARD_AS_POSIX(s2n_array_num_elements(chain_and_key->san_names, &len));
+ POSIX_GUARD_RESULT(s2n_array_num_elements(chain_and_key->san_names, &len));
if (len > 0) {
if (s2n_does_cert_san_match_hostname(chain_and_key, dns_name)) {
return 1;
@@ -524,11 +627,259 @@ void *s2n_cert_chain_and_key_get_ctx(struct s2n_cert_chain_and_key *cert_and_key
s2n_pkey_type s2n_cert_chain_and_key_get_pkey_type(struct s2n_cert_chain_and_key *chain_and_key)
{
+ if (chain_and_key == NULL
+ || chain_and_key->cert_chain == NULL
+ || chain_and_key->cert_chain->head == NULL) {
+ return S2N_PKEY_TYPE_UNKNOWN;
+ }
return chain_and_key->cert_chain->head->pkey_type;
}
s2n_cert_private_key *s2n_cert_chain_and_key_get_private_key(struct s2n_cert_chain_and_key *chain_and_key)
{
- ENSURE_REF_PTR(chain_and_key);
+ PTR_ENSURE_REF(chain_and_key);
return chain_and_key->private_key;
}
+
+int s2n_cert_chain_get_length(const struct s2n_cert_chain_and_key *chain_and_key, uint32_t *cert_length)
+{
+ POSIX_ENSURE_REF(chain_and_key);
+ POSIX_ENSURE_REF(cert_length);
+
+ struct s2n_cert *head_cert = chain_and_key->cert_chain->head;
+ POSIX_ENSURE_REF(head_cert);
+ *cert_length = 1;
+ struct s2n_cert *next_cert = head_cert->next;
+ while (next_cert != NULL) {
+ *cert_length += 1;
+ next_cert = next_cert->next;
+ }
+
+ return S2N_SUCCESS;
+}
+
+int s2n_cert_chain_get_cert(const struct s2n_cert_chain_and_key *chain_and_key, struct s2n_cert **out_cert,
+ const uint32_t cert_idx)
+{
+ POSIX_ENSURE_REF(chain_and_key);
+ POSIX_ENSURE_REF(out_cert);
+
+ struct s2n_cert *cur_cert = chain_and_key->cert_chain->head;
+ POSIX_ENSURE_REF(cur_cert);
+ uint32_t counter = 0;
+
+ struct s2n_cert *next_cert = cur_cert->next;
+
+ while ((next_cert != NULL) && (counter < cert_idx)) {
+ cur_cert = next_cert;
+ next_cert = next_cert->next;
+ counter++;
+ }
+
+ POSIX_ENSURE(counter == cert_idx, S2N_ERR_NO_CERT_FOUND);
+ POSIX_ENSURE(cur_cert != NULL, S2N_ERR_NO_CERT_FOUND);
+ *out_cert = cur_cert;
+
+ return S2N_SUCCESS;
+}
+
+int s2n_cert_get_der(const struct s2n_cert *cert, const uint8_t **out_cert_der, uint32_t *cert_length)
+{
+ POSIX_ENSURE_REF(cert);
+ POSIX_ENSURE_REF(out_cert_der);
+ POSIX_ENSURE_REF(cert_length);
+
+ *cert_length = cert->raw.size;
+ *out_cert_der = cert->raw.data;
+
+ return S2N_SUCCESS;
+}
+
+static int s2n_asn1_obj_free(ASN1_OBJECT ** data)
+{
+ if (*data != NULL) {
+ ASN1_OBJECT_free(*data);
+ }
+ return S2N_SUCCESS;
+}
+
+static int s2n_asn1_string_free(ASN1_STRING** data)
+{
+ if (*data != NULL) {
+ ASN1_STRING_free(*data);
+ }
+ return S2N_SUCCESS;
+}
+
+static int s2n_utf8_string_from_extension_data(const uint8_t *extension_data, uint32_t extension_len, uint8_t *out_data, uint32_t *out_len)
+{
+ DEFER_CLEANUP(ASN1_STRING *asn1_str = NULL, s2n_asn1_string_free);
+ /* Note that d2i_ASN1_UTF8STRING increments *der_in to the byte following the parsed data.
+ * Using a temporary variable is mandatory to prevent memory free-ing errors.
+ * Ref to the warning section here for more information:
+ * https://www.openssl.org/docs/man1.1.0/man3/d2i_ASN1_UTF8STRING.html.
+ */
+ const uint8_t *asn1_str_data = extension_data;
+ asn1_str = d2i_ASN1_UTF8STRING(NULL, (const unsigned char **)(void *)&asn1_str_data, extension_len);
+ POSIX_ENSURE(asn1_str != NULL, S2N_ERR_INVALID_X509_EXTENSION_TYPE);
+ /* ASN1_STRING_type() returns the type of `asn1_str`, using standard constants such as V_ASN1_OCTET_STRING.
+ * Ref: https://www.openssl.org/docs/man1.1.0/man3/ASN1_STRING_type.html.
+ */
+ int type = ASN1_STRING_type(asn1_str);
+ POSIX_ENSURE(type == V_ASN1_UTF8STRING, S2N_ERR_INVALID_X509_EXTENSION_TYPE);
+
+ int len = ASN1_STRING_length(asn1_str);
+ if (out_data != NULL) {
+ POSIX_ENSURE(*out_len >= len, S2N_ERR_INSUFFICIENT_MEM_SIZE);
+ /* ASN1_STRING_data() returns an internal pointer to the data.
+ * Since this is an internal pointer it should not be freed or modified in any way.
+ * Ref: https://www.openssl.org/docs/man1.0.2/man3/ASN1_STRING_data.html.
+ */
+ unsigned char *internal_data = ASN1_STRING_data(asn1_str);
+ POSIX_ENSURE_REF(internal_data);
+ POSIX_CHECKED_MEMCPY(out_data, internal_data, len);
+ }
+ *out_len = len;
+ return S2N_SUCCESS;
+}
+
+int s2n_cert_get_utf8_string_from_extension_data_length(const uint8_t *extension_data, uint32_t extension_len, uint32_t *utf8_str_len)
+{
+ POSIX_ENSURE_REF(extension_data);
+ POSIX_ENSURE_GT(extension_len, 0);
+ POSIX_ENSURE_REF(utf8_str_len);
+
+ POSIX_GUARD(s2n_utf8_string_from_extension_data(extension_data, extension_len, NULL, utf8_str_len));
+
+ return S2N_SUCCESS;
+}
+
+int s2n_cert_get_utf8_string_from_extension_data(const uint8_t *extension_data, uint32_t extension_len, uint8_t *out_data, uint32_t *out_len)
+{
+ POSIX_ENSURE_REF(extension_data);
+ POSIX_ENSURE_GT(extension_len, 0);
+ POSIX_ENSURE_REF(out_data);
+ POSIX_ENSURE_REF(out_len);
+
+ POSIX_GUARD(s2n_utf8_string_from_extension_data(extension_data, extension_len, out_data, out_len));
+
+ return S2N_SUCCESS;
+}
+
+static int s2n_parse_x509_extension(struct s2n_cert *cert, const uint8_t *oid,
+ uint8_t *ext_value, uint32_t *ext_value_len, bool *critical)
+{
+ POSIX_ENSURE_REF(cert->raw.data);
+ /* Obtain the openssl x509 cert from the ASN1 DER certificate input.
+ * Note that d2i_X509 increments *der_in to the byte following the parsed data.
+ * Using a temporary variable is mandatory to prevent memory free-ing errors.
+ * Ref to the warning section here for more information:
+ * https://www.openssl.org/docs/man1.1.0/man3/d2i_X509.html.
+ */
+ uint8_t *der_in = cert->raw.data;
+ DEFER_CLEANUP(X509 *x509_cert = d2i_X509(NULL, (const unsigned char **)(void *)&der_in, cert->raw.size),
+ X509_free_pointer);
+ POSIX_ENSURE_REF(x509_cert);
+
+ /* Retrieve the number of x509 extensions present in the certificate
+ * X509_get_ext_count returns the number of extensions in the x509 certificate.
+ * Ref: https://www.openssl.org/docs/man1.1.0/man3/X509_get_ext_count.html.
+ */
+ int ext_count = X509_get_ext_count(x509_cert);
+ POSIX_ENSURE_GT(ext_count, 0);
+
+ /* OBJ_txt2obj() converts the input text string into an ASN1_OBJECT structure.
+ * If no_name is 0 then long names and short names will be interpreted as well as numerical forms.
+ * If no_name is 1 only the numerical form is acceptable.
+ * Ref: https://www.openssl.org/docs/man1.1.0/man3/OBJ_txt2obj.html.
+ */
+ DEFER_CLEANUP(ASN1_OBJECT *asn1_obj_in = OBJ_txt2obj((const char *)oid, 0), s2n_asn1_obj_free);
+ POSIX_ENSURE_REF(asn1_obj_in);
+
+ for (size_t loc = 0; loc < ext_count; loc++) {
+ ASN1_OCTET_STRING *asn1_str = NULL;
+ bool match_found = false;
+
+ /* Retrieve the x509 extension at location loc.
+ * X509_get_ext() retrieves extension loc from x.
+ * The index loc can take any value from 0 to X509_get_ext_count(x) - 1.
+ * The returned extension is an internal pointer which must not be freed up by the application.
+ * Ref: https://www.openssl.org/docs/man1.1.0/man3/X509_get_ext.html.
+ */
+ X509_EXTENSION *x509_ext = X509_get_ext(x509_cert, loc);
+ POSIX_ENSURE_REF(x509_ext);
+
+ /* Retrieve the extension object/OID/extnId.
+ * X509_EXTENSION_get_object() returns the extension type of `x509_ext` as an ASN1_OBJECT pointer.
+ * The returned pointer is an internal value which must not be freed up.
+ * Ref: https://www.openssl.org/docs/man1.1.0/man3/X509_EXTENSION_get_object.html.
+ */
+ ASN1_OBJECT *asn1_obj = X509_EXTENSION_get_object(x509_ext);
+ POSIX_ENSURE_REF(asn1_obj);
+
+ /* OBJ_cmp() compares two ASN1_OBJECT objects. If the two are identical 0 is returned.
+ * Ref: https://www.openssl.org/docs/man1.1.0/man3/OBJ_cmp.html.
+ */
+ match_found = (0 == OBJ_cmp(asn1_obj_in, asn1_obj));
+
+ /* If match found, retrieve the corresponding OID value for the x509 extension */
+ if (match_found) {
+ /* X509_EXTENSION_get_data() returns the data of extension `x509_ext`.
+ * The returned pointer is an internal value which must not be freed up.
+ * Ref: https://www.openssl.org/docs/man1.1.0/man3/X509_EXTENSION_get_data.html.
+ */
+ asn1_str = X509_EXTENSION_get_data(x509_ext);
+ /* ASN1_STRING_length() returns the length of the content of `asn1_str`.
+ * Ref: https://www.openssl.org/docs/man1.1.0/man3/ASN1_STRING_length.html.
+ */
+ int len = ASN1_STRING_length(asn1_str);
+ if (ext_value != NULL) {
+ POSIX_ENSURE(*ext_value_len >= len, S2N_ERR_INSUFFICIENT_MEM_SIZE);
+ /* ASN1_STRING_data() returns an internal pointer to the data.
+ * Since this is an internal pointer it should not be freed or modified in any way.
+ * Ref: https://www.openssl.org/docs/man1.0.2/man3/ASN1_STRING_data.html.
+ */
+ unsigned char *internal_data = ASN1_STRING_data(asn1_str);
+ POSIX_ENSURE_REF(internal_data);
+ POSIX_CHECKED_MEMCPY(ext_value, internal_data, len);
+ }
+ if (critical != NULL) {
+ /* Retrieve the x509 extension's critical value.
+ * X509_EXTENSION_get_critical() returns the criticality of extension `x509_ext`,
+ * it returns 1 for critical and 0 for non-critical.
+ * Ref: https://www.openssl.org/docs/man1.1.0/man3/X509_EXTENSION_get_critical.html.
+ */
+ *critical = X509_EXTENSION_get_critical(x509_ext);
+ }
+ *ext_value_len = len;
+ return S2N_SUCCESS;
+ }
+ }
+
+ POSIX_BAIL(S2N_ERR_X509_EXTENSION_VALUE_NOT_FOUND);
+}
+
+int s2n_cert_get_x509_extension_value_length(struct s2n_cert *cert, const uint8_t *oid, uint32_t *ext_value_len)
+{
+ POSIX_ENSURE_REF(cert);
+ POSIX_ENSURE_REF(oid);
+ POSIX_ENSURE_REF(ext_value_len);
+
+ POSIX_GUARD(s2n_parse_x509_extension(cert, oid, NULL, ext_value_len, NULL));
+
+ return S2N_SUCCESS;
+}
+
+int s2n_cert_get_x509_extension_value(struct s2n_cert *cert, const uint8_t *oid,
+ uint8_t *ext_value, uint32_t *ext_value_len, bool *critical)
+{
+ POSIX_ENSURE_REF(cert);
+ POSIX_ENSURE_REF(oid);
+ POSIX_ENSURE_REF(ext_value);
+ POSIX_ENSURE_REF(ext_value_len);
+ POSIX_ENSURE_REF(critical);
+
+ POSIX_GUARD(s2n_parse_x509_extension(cert, oid, ext_value, ext_value_len, critical));
+
+ return S2N_SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_certificate.h b/contrib/restricted/aws/s2n/crypto/s2n_certificate.h
index 2395595641..e78601c1ae 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_certificate.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_certificate.h
@@ -19,7 +19,7 @@
#include <openssl/x509.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include "crypto/s2n_pkey.h"
#include "stuffer/s2n_stuffer.h"
@@ -27,6 +27,7 @@
struct s2n_cert {
s2n_pkey_type pkey_type;
+ uint16_t ec_curve_nid;
s2n_cert_public_key public_key;
struct s2n_blob raw;
struct s2n_cert *next;
@@ -66,10 +67,21 @@ int s2n_cert_chain_and_key_load_cns(struct s2n_cert_chain_and_key *chain_and_key
int s2n_cert_chain_and_key_load_sans(struct s2n_cert_chain_and_key *chain_and_key, X509 *x509_cert);
int s2n_cert_chain_and_key_matches_dns_name(const struct s2n_cert_chain_and_key *chain_and_key, const struct s2n_blob *dns_name);
+S2N_CLEANUP_RESULT s2n_cert_chain_and_key_ptr_free(struct s2n_cert_chain_and_key **cert_and_key);
int s2n_cert_set_cert_type(struct s2n_cert *cert, s2n_pkey_type pkey_type);
int s2n_send_cert_chain(struct s2n_connection *conn, struct s2n_stuffer *out, struct s2n_cert_chain_and_key *chain_and_key);
int s2n_send_empty_cert_chain(struct s2n_stuffer *out);
int s2n_create_cert_chain_from_stuffer(struct s2n_cert_chain *cert_chain_out, struct s2n_stuffer *chain_in_stuffer);
+int s2n_cert_chain_and_key_set_cert_chain_bytes(struct s2n_cert_chain_and_key *cert_and_key, uint8_t *cert_chain_pem, uint32_t cert_chain_len);
+int s2n_cert_chain_and_key_set_private_key_bytes(struct s2n_cert_chain_and_key *cert_and_key, uint8_t *private_key_pem, uint32_t private_key_len);
int s2n_cert_chain_and_key_set_cert_chain(struct s2n_cert_chain_and_key *cert_and_key, const char *cert_chain_pem);
int s2n_cert_chain_and_key_set_private_key(struct s2n_cert_chain_and_key *cert_and_key, const char *private_key_pem);
s2n_pkey_type s2n_cert_chain_and_key_get_pkey_type(struct s2n_cert_chain_and_key *chain_and_key);
+int s2n_cert_chain_get_length(const struct s2n_cert_chain_and_key *chain_and_key, uint32_t *cert_length);
+int s2n_cert_chain_get_cert(const struct s2n_cert_chain_and_key *chain_and_key, struct s2n_cert **out_cert, const uint32_t cert_idx);
+int s2n_cert_get_der(const struct s2n_cert *cert, const uint8_t **out_cert_der, uint32_t *cert_length);
+int s2n_cert_chain_free(struct s2n_cert_chain *cert_chain);
+int s2n_cert_get_x509_extension_value_length(struct s2n_cert *cert, const uint8_t *oid, uint32_t *ext_value_len);
+int s2n_cert_get_x509_extension_value(struct s2n_cert *cert, const uint8_t *oid, uint8_t *ext_value, uint32_t *ext_value_len, bool *critical);
+int s2n_cert_get_utf8_string_from_extension_data_length(const uint8_t *extension_data, uint32_t extension_len, uint32_t *utf8_str_len);
+int s2n_cert_get_utf8_string_from_extension_data(const uint8_t *extension_data, uint32_t extension_len, uint8_t *out_data, uint32_t *out_len);
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_cipher.c b/contrib/restricted/aws/s2n/crypto/s2n_cipher.c
index 8a83980518..c01e44d8da 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_cipher.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_cipher.c
@@ -24,10 +24,10 @@
int s2n_session_key_alloc(struct s2n_session_key *key)
{
- eq_check(key->evp_cipher_ctx, NULL);
- notnull_check(key->evp_cipher_ctx = EVP_CIPHER_CTX_new());
+ POSIX_ENSURE_EQ(key->evp_cipher_ctx, NULL);
+ POSIX_ENSURE_REF(key->evp_cipher_ctx = EVP_CIPHER_CTX_new());
#if defined(S2N_CIPHER_AEAD_API_AVAILABLE)
- eq_check(key->evp_aead_ctx, NULL);
+ POSIX_ENSURE_EQ(key->evp_aead_ctx, NULL);
key->evp_aead_ctx = OPENSSL_malloc(sizeof(EVP_AEAD_CTX));
if (key->evp_aead_ctx == NULL) {
EVP_CIPHER_CTX_free(key->evp_cipher_ctx);
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_composite_cipher_aes_sha.c b/contrib/restricted/aws/s2n/crypto/s2n_composite_cipher_aes_sha.c
index 60eee634b3..3be8de854f 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_composite_cipher_aes_sha.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_composite_cipher_aes_sha.c
@@ -130,18 +130,18 @@ static int s2n_composite_cipher_aes_sha_initial_hmac(struct s2n_session_key *key
* will fail. Instead of defining a possibly dangerous default or hard coding this to 0x16 error out with BoringSSL and AWS-LC.
*/
#if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)
- S2N_ERROR(S2N_ERR_NO_SUPPORTED_LIBCRYPTO_API);
+ POSIX_BAIL(S2N_ERR_NO_SUPPORTED_LIBCRYPTO_API);
#else
uint8_t ctrl_buf[S2N_TLS12_AAD_LEN];
struct s2n_blob ctrl_blob = { .data = ctrl_buf, .size = S2N_TLS12_AAD_LEN };
struct s2n_stuffer ctrl_stuffer = {0};
- GUARD(s2n_stuffer_init(&ctrl_stuffer, &ctrl_blob));
+ POSIX_GUARD(s2n_stuffer_init(&ctrl_stuffer, &ctrl_blob));
- GUARD(s2n_stuffer_write_bytes(&ctrl_stuffer, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
- GUARD(s2n_stuffer_write_uint8(&ctrl_stuffer, content_type));
- GUARD(s2n_stuffer_write_uint8(&ctrl_stuffer, protocol_version / 10));
- GUARD(s2n_stuffer_write_uint8(&ctrl_stuffer, protocol_version % 10));
- GUARD(s2n_stuffer_write_uint16(&ctrl_stuffer, payload_and_eiv_len));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&ctrl_stuffer, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&ctrl_stuffer, content_type));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&ctrl_stuffer, protocol_version / 10));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&ctrl_stuffer, protocol_version % 10));
+ POSIX_GUARD(s2n_stuffer_write_uint16(&ctrl_stuffer, payload_and_eiv_len));
/* This will unnecessarily mangle the input buffer, which is fine since it's temporary
* Return value will be length of digest, padding, and padding length byte.
@@ -150,7 +150,7 @@ static int s2n_composite_cipher_aes_sha_initial_hmac(struct s2n_session_key *key
*/
int ctrl_ret = EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_AEAD_TLS1_AAD, S2N_TLS12_AAD_LEN, ctrl_buf);
- S2N_ERROR_IF(ctrl_ret < 0, S2N_ERR_INITIAL_HMAC);
+ S2N_ERROR_IF(ctrl_ret <= 0, S2N_ERR_INITIAL_HMAC);
*extra = ctrl_ret;
return 0;
@@ -159,27 +159,27 @@ static int s2n_composite_cipher_aes_sha_initial_hmac(struct s2n_session_key *key
static int s2n_composite_cipher_aes_sha_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out)
{
- eq_check(out->size, in->size);
+ POSIX_ENSURE_EQ(out->size, in->size);
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
- GUARD_OSSL(EVP_Cipher(key->evp_cipher_ctx, out->data, in->data, in->size), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_Cipher(key->evp_cipher_ctx, out->data, in->data, in->size), S2N_ERR_ENCRYPT);
return 0;
}
static int s2n_composite_cipher_aes_sha_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out)
{
- eq_check(out->size, in->size);
+ POSIX_ENSURE_EQ(out->size, in->size);
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
- GUARD_OSSL(EVP_Cipher(key->evp_cipher_ctx, out->data, in->data, in->size), S2N_ERR_DECRYPT);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT);
+ POSIX_GUARD_OSSL(EVP_Cipher(key->evp_cipher_ctx, out->data, in->data, in->size), S2N_ERR_DECRYPT);
return 0;
}
static int s2n_composite_cipher_aes_sha_set_mac_write_key(struct s2n_session_key *key, uint8_t *mac_key, uint32_t mac_size)
{
- eq_check(mac_size, SHA_DIGEST_LENGTH);
+ POSIX_ENSURE_EQ(mac_size, SHA_DIGEST_LENGTH);
EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_AEAD_SET_MAC_KEY, mac_size, mac_key);
@@ -188,7 +188,7 @@ static int s2n_composite_cipher_aes_sha_set_mac_write_key(struct s2n_session_key
static int s2n_composite_cipher_aes_sha256_set_mac_write_key(struct s2n_session_key *key, uint8_t *mac_key, uint32_t mac_size)
{
- eq_check(mac_size, SHA256_DIGEST_LENGTH);
+ POSIX_ENSURE_EQ(mac_size, SHA256_DIGEST_LENGTH);
EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_AEAD_SET_MAC_KEY, mac_size, mac_key);
@@ -198,7 +198,7 @@ static int s2n_composite_cipher_aes_sha256_set_mac_write_key(struct s2n_session_
static int s2n_composite_cipher_aes128_sha_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 16);
+ POSIX_ENSURE_EQ(in->size, 16);
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
EVP_EncryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_128_cbc_hmac_sha1(), NULL, in->data, NULL);
@@ -208,7 +208,7 @@ static int s2n_composite_cipher_aes128_sha_set_encryption_key(struct s2n_session
static int s2n_composite_cipher_aes128_sha_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 16);
+ POSIX_ENSURE_EQ(in->size, 16);
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
EVP_DecryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_128_cbc_hmac_sha1(), NULL, in->data, NULL);
@@ -218,7 +218,7 @@ static int s2n_composite_cipher_aes128_sha_set_decryption_key(struct s2n_session
static int s2n_composite_cipher_aes256_sha_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 32);
+ POSIX_ENSURE_EQ(in->size, 32);
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
EVP_EncryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_256_cbc_hmac_sha1(), NULL, in->data, NULL);
@@ -228,7 +228,7 @@ static int s2n_composite_cipher_aes256_sha_set_encryption_key(struct s2n_session
static int s2n_composite_cipher_aes256_sha_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 32);
+ POSIX_ENSURE_EQ(in->size, 32);
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
EVP_DecryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_256_cbc_hmac_sha1(), NULL, in->data, NULL);
@@ -238,7 +238,7 @@ static int s2n_composite_cipher_aes256_sha_set_decryption_key(struct s2n_session
static int s2n_composite_cipher_aes128_sha256_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 16);
+ POSIX_ENSURE_EQ(in->size, 16);
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
EVP_EncryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_128_cbc_hmac_sha256(), NULL, in->data, NULL);
@@ -248,7 +248,7 @@ static int s2n_composite_cipher_aes128_sha256_set_encryption_key(struct s2n_sess
static int s2n_composite_cipher_aes128_sha256_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 16);
+ POSIX_ENSURE_EQ(in->size, 16);
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
EVP_DecryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_128_cbc_hmac_sha256(), NULL, in->data, NULL);
@@ -258,7 +258,7 @@ static int s2n_composite_cipher_aes128_sha256_set_decryption_key(struct s2n_sess
static int s2n_composite_cipher_aes256_sha256_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 32);
+ POSIX_ENSURE_EQ(in->size, 32);
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
EVP_EncryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_256_cbc_hmac_sha256(), NULL, in->data, NULL);
@@ -268,7 +268,7 @@ static int s2n_composite_cipher_aes256_sha256_set_encryption_key(struct s2n_sess
static int s2n_composite_cipher_aes256_sha256_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 32);
+ POSIX_ENSURE_EQ(in->size, 32);
EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, EVP_CIPH_NO_PADDING);
EVP_DecryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_256_cbc_hmac_sha256(), NULL, in->data, NULL);
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_crypto.c b/contrib/restricted/aws/s2n/crypto/s2n_crypto.c
new file mode 100644
index 0000000000..d9062f0578
--- /dev/null
+++ b/contrib/restricted/aws/s2n/crypto/s2n_crypto.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include "api/s2n.h"
+#include "crypto/s2n_crypto.h"
+
+/* OPENSSL_free is defined within <openssl/crypto.h> for OpenSSL Libcrypto
+ * and within <openssl/mem.h> for AWS_LC and BoringSSL */
+#if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)
+#error #include <openssl/mem.h>
+#else
+#include <openssl/crypto.h>
+#endif
+
+int s2n_crypto_free(uint8_t** data)
+{
+ if (*data != NULL) {
+ OPENSSL_free(*data);
+ }
+ return S2N_SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_crypto.h b/contrib/restricted/aws/s2n/crypto/s2n_crypto.h
index 86e029c361..8c5fc04266 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_crypto.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_crypto.h
@@ -15,8 +15,12 @@
#pragma once
+#include <stdint.h>
+
#include <openssl/aes.h>
#include <openssl/rc4.h>
#include <openssl/des.h>
#include <openssl/rsa.h>
#include <openssl/dh.h>
+
+int s2n_crypto_free(uint8_t** data);
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_dhe.c b/contrib/restricted/aws/s2n/crypto/s2n_dhe.c
index 9304a4b46f..9f6bc31249 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_dhe.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_dhe.c
@@ -72,14 +72,14 @@ static const BIGNUM *s2n_get_g_dh_param(struct s2n_dh_params *dh_params)
static int s2n_check_p_g_dh_params(struct s2n_dh_params *dh_params)
{
- notnull_check(dh_params);
- notnull_check(dh_params->dh);
+ POSIX_ENSURE_REF(dh_params);
+ POSIX_ENSURE_REF(dh_params->dh);
const BIGNUM *p = s2n_get_p_dh_param(dh_params);
const BIGNUM *g = s2n_get_g_dh_param(dh_params);
- notnull_check(g);
- notnull_check(p);
+ POSIX_ENSURE_REF(g);
+ POSIX_ENSURE_REF(p);
S2N_ERROR_IF(DH_size(dh_params->dh) < S2N_MIN_DH_PRIME_SIZE_BYTES, S2N_ERR_DH_PARAMS_CREATE);
S2N_ERROR_IF(BN_is_zero(g), S2N_ERR_DH_PARAMS_CREATE);
@@ -92,7 +92,7 @@ static int s2n_check_pub_key_dh_params(struct s2n_dh_params *dh_params)
{
const BIGNUM *pub_key = s2n_get_Ys_dh_param(dh_params);
- notnull_check(pub_key);
+ POSIX_ENSURE_REF(pub_key);
S2N_ERROR_IF(BN_is_zero(pub_key), S2N_ERR_DH_PARAMS_CREATE);
@@ -102,9 +102,9 @@ static int s2n_check_pub_key_dh_params(struct s2n_dh_params *dh_params)
static int s2n_set_p_g_Ys_dh_params(struct s2n_dh_params *dh_params, struct s2n_blob *p, struct s2n_blob *g,
struct s2n_blob *Ys)
{
- ENSURE_POSIX(p->size <= INT_MAX, S2N_ERR_INTEGER_OVERFLOW);
- ENSURE_POSIX(g->size <= INT_MAX, S2N_ERR_INTEGER_OVERFLOW);
- ENSURE_POSIX(Ys->size <= INT_MAX, S2N_ERR_INTEGER_OVERFLOW);
+ POSIX_ENSURE(p->size <= INT_MAX, S2N_ERR_INTEGER_OVERFLOW);
+ POSIX_ENSURE(g->size <= INT_MAX, S2N_ERR_INTEGER_OVERFLOW);
+ POSIX_ENSURE(Ys->size <= INT_MAX, S2N_ERR_INTEGER_OVERFLOW);
BIGNUM *bn_p = BN_bin2bn(( const unsigned char * )p->data, p->size, NULL);
BIGNUM *bn_g = BN_bin2bn(( const unsigned char * )g->data, g->size, NULL);
BIGNUM *bn_Ys = BN_bin2bn(( const unsigned char * )Ys->data, Ys->size, NULL);
@@ -113,10 +113,10 @@ static int s2n_set_p_g_Ys_dh_params(struct s2n_dh_params *dh_params, struct s2n_
/* Per https://www.openssl.org/docs/man1.1.0/crypto/DH_get0_pqg.html:
* values that have been passed in should not be freed directly after this function has been called
*/
- GUARD_OSSL(DH_set0_pqg(dh_params->dh, bn_p, NULL, bn_g), S2N_ERR_DH_PARAMS_CREATE);
+ POSIX_GUARD_OSSL(DH_set0_pqg(dh_params->dh, bn_p, NULL, bn_g), S2N_ERR_DH_PARAMS_CREATE);
/* Same as DH_set0_pqg */
- GUARD_OSSL(DH_set0_key(dh_params->dh, bn_Ys, NULL), S2N_ERR_DH_PARAMS_CREATE);
+ POSIX_GUARD_OSSL(DH_set0_key(dh_params->dh, bn_Ys, NULL), S2N_ERR_DH_PARAMS_CREATE);
#else
dh_params->dh->p = bn_p;
dh_params->dh->g = bn_g;
@@ -128,34 +128,34 @@ static int s2n_set_p_g_Ys_dh_params(struct s2n_dh_params *dh_params, struct s2n_
int s2n_check_all_dh_params(struct s2n_dh_params *dh_params)
{
- GUARD(s2n_check_p_g_dh_params(dh_params));
- GUARD(s2n_check_pub_key_dh_params(dh_params));
+ POSIX_GUARD(s2n_check_p_g_dh_params(dh_params));
+ POSIX_GUARD(s2n_check_pub_key_dh_params(dh_params));
return S2N_SUCCESS;
}
int s2n_pkcs3_to_dh_params(struct s2n_dh_params *dh_params, struct s2n_blob *pkcs3)
{
- notnull_check(dh_params);
- PRECONDITION_POSIX(s2n_blob_validate(pkcs3));
+ POSIX_ENSURE_REF(dh_params);
+ POSIX_PRECONDITION(s2n_blob_validate(pkcs3));
uint8_t *original_ptr = pkcs3->data;
dh_params->dh = d2i_DHparams(NULL, ( const unsigned char ** )( void * )&pkcs3->data, pkcs3->size);
- GUARD(s2n_check_p_g_dh_params(dh_params));
- if (pkcs3->data - original_ptr != pkcs3->size) {
+ POSIX_GUARD(s2n_check_p_g_dh_params(dh_params));
+ if (pkcs3->data && (pkcs3->data - original_ptr != pkcs3->size)) {
DH_free(dh_params->dh);
- S2N_ERROR(S2N_ERR_INVALID_PKCS3);
+ POSIX_BAIL(S2N_ERR_INVALID_PKCS3);
}
pkcs3->data = original_ptr;
/* Require at least 2048 bits for the DH size */
if (DH_size(dh_params->dh) < S2N_MIN_DH_PRIME_SIZE_BYTES) {
DH_free(dh_params->dh);
- S2N_ERROR(S2N_ERR_DH_TOO_SMALL);
+ POSIX_BAIL(S2N_ERR_DH_TOO_SMALL);
}
/* Check the generator and prime */
- GUARD(s2n_dh_params_check(dh_params));
+ POSIX_GUARD(s2n_dh_params_check(dh_params));
return S2N_SUCCESS;
}
@@ -163,25 +163,25 @@ int s2n_pkcs3_to_dh_params(struct s2n_dh_params *dh_params, struct s2n_blob *pkc
int s2n_dh_p_g_Ys_to_dh_params(struct s2n_dh_params *server_dh_params, struct s2n_blob *p, struct s2n_blob *g,
struct s2n_blob *Ys)
{
- ENSURE_POSIX_REF(server_dh_params);
- PRECONDITION_POSIX(s2n_blob_validate(p));
- PRECONDITION_POSIX(s2n_blob_validate(g));
- PRECONDITION_POSIX(s2n_blob_validate(Ys));
+ POSIX_ENSURE_REF(server_dh_params);
+ POSIX_PRECONDITION(s2n_blob_validate(p));
+ POSIX_PRECONDITION(s2n_blob_validate(g));
+ POSIX_PRECONDITION(s2n_blob_validate(Ys));
server_dh_params->dh = DH_new();
- ENSURE_POSIX(server_dh_params->dh != NULL, S2N_ERR_DH_PARAMS_CREATE);
+ POSIX_ENSURE(server_dh_params->dh != NULL, S2N_ERR_DH_PARAMS_CREATE);
- GUARD(s2n_set_p_g_Ys_dh_params(server_dh_params, p, g, Ys));
- GUARD(s2n_check_all_dh_params(server_dh_params));
+ POSIX_GUARD(s2n_set_p_g_Ys_dh_params(server_dh_params, p, g, Ys));
+ POSIX_GUARD(s2n_check_all_dh_params(server_dh_params));
return S2N_SUCCESS;
}
int s2n_dh_params_to_p_g_Ys(struct s2n_dh_params *server_dh_params, struct s2n_stuffer *out, struct s2n_blob *output)
{
- GUARD(s2n_check_all_dh_params(server_dh_params));
- PRECONDITION_POSIX(s2n_stuffer_validate(out));
- PRECONDITION_POSIX(s2n_blob_validate(output));
+ POSIX_GUARD(s2n_check_all_dh_params(server_dh_params));
+ POSIX_PRECONDITION(s2n_stuffer_validate(out));
+ POSIX_PRECONDITION(s2n_blob_validate(output));
const BIGNUM *bn_p = s2n_get_p_dh_param(server_dh_params);
const BIGNUM *bn_g = s2n_get_g_dh_param(server_dh_params);
@@ -195,22 +195,22 @@ int s2n_dh_params_to_p_g_Ys(struct s2n_dh_params *server_dh_params, struct s2n_s
uint8_t *Ys = NULL;
output->data = s2n_stuffer_raw_write(out, 0);
- notnull_check(output->data);
+ POSIX_ENSURE_REF(output->data);
- GUARD(s2n_stuffer_write_uint16(out, p_size));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, p_size));
p = s2n_stuffer_raw_write(out, p_size);
- notnull_check(p);
- ENSURE_POSIX(BN_bn2bin(bn_p, p) == p_size, S2N_ERR_DH_SERIALIZING);
+ POSIX_ENSURE_REF(p);
+ POSIX_ENSURE(BN_bn2bin(bn_p, p) == p_size, S2N_ERR_DH_SERIALIZING);
- GUARD(s2n_stuffer_write_uint16(out, g_size));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, g_size));
g = s2n_stuffer_raw_write(out, g_size);
- notnull_check(g);
- ENSURE_POSIX(BN_bn2bin(bn_g, g) == g_size, S2N_ERR_DH_SERIALIZING);
+ POSIX_ENSURE_REF(g);
+ POSIX_ENSURE(BN_bn2bin(bn_g, g) == g_size, S2N_ERR_DH_SERIALIZING);
- GUARD(s2n_stuffer_write_uint16(out, Ys_size));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, Ys_size));
Ys = s2n_stuffer_raw_write(out, Ys_size);
- notnull_check(Ys);
- ENSURE_POSIX(BN_bn2bin(bn_Ys, Ys) == Ys_size, S2N_ERR_DH_SERIALIZING);
+ POSIX_ENSURE_REF(Ys);
+ POSIX_ENSURE(BN_bn2bin(bn_Ys, Ys) == Ys_size, S2N_ERR_DH_SERIALIZING);
output->size = p_size + 2 + g_size + 2 + Ys_size + 2;
@@ -225,40 +225,40 @@ int s2n_dh_compute_shared_secret_as_client(struct s2n_dh_params *server_dh_param
uint16_t client_pub_key_size = 0;
int shared_key_size = 0;
- GUARD(s2n_dh_params_check(server_dh_params));
- GUARD(s2n_dh_params_copy(server_dh_params, &client_params));
- GUARD(s2n_dh_generate_ephemeral_key(&client_params));
- GUARD(s2n_alloc(shared_key, DH_size(server_dh_params->dh)));
+ POSIX_GUARD(s2n_dh_params_check(server_dh_params));
+ POSIX_GUARD(s2n_dh_params_copy(server_dh_params, &client_params));
+ POSIX_GUARD(s2n_dh_generate_ephemeral_key(&client_params));
+ POSIX_GUARD(s2n_alloc(shared_key, DH_size(server_dh_params->dh)));
const BIGNUM *client_pub_key_bn = s2n_get_Ys_dh_param(&client_params);
- ENSURE_POSIX_REF(client_pub_key_bn);
+ POSIX_ENSURE_REF(client_pub_key_bn);
client_pub_key_size = BN_num_bytes(client_pub_key_bn);
- GUARD(s2n_stuffer_write_uint16(Yc_out, client_pub_key_size));
+ POSIX_GUARD(s2n_stuffer_write_uint16(Yc_out, client_pub_key_size));
client_pub_key = s2n_stuffer_raw_write(Yc_out, client_pub_key_size);
if (client_pub_key == NULL) {
- GUARD(s2n_free(shared_key));
- GUARD(s2n_dh_params_free(&client_params));
- S2N_ERROR(S2N_ERR_DH_WRITING_PUBLIC_KEY);
+ POSIX_GUARD(s2n_free(shared_key));
+ POSIX_GUARD(s2n_dh_params_free(&client_params));
+ POSIX_BAIL(S2N_ERR_DH_WRITING_PUBLIC_KEY);
}
if (BN_bn2bin(client_pub_key_bn, client_pub_key) != client_pub_key_size) {
- GUARD(s2n_free(shared_key));
- GUARD(s2n_dh_params_free(&client_params));
- S2N_ERROR(S2N_ERR_DH_COPYING_PUBLIC_KEY);
+ POSIX_GUARD(s2n_free(shared_key));
+ POSIX_GUARD(s2n_dh_params_free(&client_params));
+ POSIX_BAIL(S2N_ERR_DH_COPYING_PUBLIC_KEY);
}
/* server_dh_params already validated */
const BIGNUM *server_pub_key_bn = s2n_get_Ys_dh_param(server_dh_params);
shared_key_size = DH_compute_key(shared_key->data, server_pub_key_bn, client_params.dh);
if (shared_key_size < 0) {
- GUARD(s2n_free(shared_key));
- GUARD(s2n_dh_params_free(&client_params));
- S2N_ERROR(S2N_ERR_DH_SHARED_SECRET);
+ POSIX_GUARD(s2n_free(shared_key));
+ POSIX_GUARD(s2n_dh_params_free(&client_params));
+ POSIX_BAIL(S2N_ERR_DH_SHARED_SECRET);
}
shared_key->size = shared_key_size;
- GUARD(s2n_dh_params_free(&client_params));
+ POSIX_GUARD(s2n_dh_params_free(&client_params));
return S2N_SUCCESS;
}
@@ -271,23 +271,23 @@ int s2n_dh_compute_shared_secret_as_server(struct s2n_dh_params *server_dh_param
int shared_key_size = 0;
BIGNUM * pub_key = NULL;
- GUARD(s2n_check_all_dh_params(server_dh_params));
+ POSIX_GUARD(s2n_check_all_dh_params(server_dh_params));
- GUARD(s2n_stuffer_read_uint16(Yc_in, &Yc_length));
+ POSIX_GUARD(s2n_stuffer_read_uint16(Yc_in, &Yc_length));
Yc.size = Yc_length;
Yc.data = s2n_stuffer_raw_read(Yc_in, Yc.size);
- notnull_check(Yc.data);
+ POSIX_ENSURE_REF(Yc.data);
pub_key = BN_bin2bn(( const unsigned char * )Yc.data, Yc.size, NULL);
- notnull_check(pub_key);
+ POSIX_ENSURE_REF(pub_key);
int server_dh_params_size = DH_size(server_dh_params->dh);
- ENSURE_POSIX(server_dh_params_size <= INT32_MAX, S2N_ERR_INTEGER_OVERFLOW);
- GUARD(s2n_alloc(shared_key, server_dh_params_size));
+ POSIX_ENSURE(server_dh_params_size <= INT32_MAX, S2N_ERR_INTEGER_OVERFLOW);
+ POSIX_GUARD(s2n_alloc(shared_key, server_dh_params_size));
shared_key_size = DH_compute_key(shared_key->data, pub_key, server_dh_params->dh);
if (shared_key_size <= 0) {
BN_free(pub_key);
- S2N_ERROR(S2N_ERR_DH_SHARED_SECRET);
+ POSIX_BAIL(S2N_ERR_DH_SHARED_SECRET);
}
shared_key->size = shared_key_size;
@@ -299,39 +299,39 @@ int s2n_dh_compute_shared_secret_as_server(struct s2n_dh_params *server_dh_param
int s2n_dh_params_check(struct s2n_dh_params *dh_params)
{
- notnull_check(dh_params);
- notnull_check(dh_params->dh);
+ POSIX_ENSURE_REF(dh_params);
+ POSIX_ENSURE_REF(dh_params->dh);
int codes = 0;
- GUARD_OSSL(DH_check(dh_params->dh, &codes), S2N_ERR_DH_PARAMETER_CHECK);
- ENSURE_POSIX(codes == 0, S2N_ERR_DH_PARAMETER_CHECK);
+ POSIX_GUARD_OSSL(DH_check(dh_params->dh, &codes), S2N_ERR_DH_PARAMETER_CHECK);
+ POSIX_ENSURE(codes == 0, S2N_ERR_DH_PARAMETER_CHECK);
return S2N_SUCCESS;
}
int s2n_dh_params_copy(struct s2n_dh_params *from, struct s2n_dh_params *to)
{
- GUARD(s2n_check_p_g_dh_params(from));
- notnull_check(to);
+ POSIX_GUARD(s2n_check_p_g_dh_params(from));
+ POSIX_ENSURE_REF(to);
to->dh = DHparams_dup(from->dh);
- ENSURE_POSIX(to->dh != NULL, S2N_ERR_DH_COPYING_PARAMETERS);
+ POSIX_ENSURE(to->dh != NULL, S2N_ERR_DH_COPYING_PARAMETERS);
return S2N_SUCCESS;
}
int s2n_dh_generate_ephemeral_key(struct s2n_dh_params *dh_params)
{
- GUARD(s2n_check_p_g_dh_params(dh_params));
+ POSIX_GUARD(s2n_check_p_g_dh_params(dh_params));
- GUARD_OSSL(DH_generate_key(dh_params->dh), S2N_ERR_DH_GENERATING_PARAMETERS);
+ POSIX_GUARD_OSSL(DH_generate_key(dh_params->dh), S2N_ERR_DH_GENERATING_PARAMETERS);
return S2N_SUCCESS;
}
int s2n_dh_params_free(struct s2n_dh_params *dh_params)
{
- notnull_check(dh_params);
+ POSIX_ENSURE_REF(dh_params);
DH_free(dh_params->dh);
dh_params->dh = NULL;
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_drbg.c b/contrib/restricted/aws/s2n/crypto/s2n_drbg.c
index abcd819e04..242fd5ab9c 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_drbg.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_drbg.c
@@ -23,206 +23,234 @@
#include "utils/s2n_random.h"
#include "utils/s2n_blob.h"
+static bool ignore_prediction_resistance_for_testing = false;
+
#define s2n_drbg_key_size(drgb) EVP_CIPHER_CTX_key_length((drbg)->ctx)
#define s2n_drbg_seed_size(drgb) (S2N_DRBG_BLOCK_SIZE + s2n_drbg_key_size(drgb))
/* This function is the same as s2n_increment_sequence_number
but it does not check for overflow, since overflow is
acceptable in DRBG */
-int s2n_increment_drbg_counter(struct s2n_blob *counter)
+S2N_RESULT s2n_increment_drbg_counter(struct s2n_blob *counter)
{
- for (int i = counter->size - 1; i >= 0; i--) {
- counter->data[i] += 1;
- if (counter->data[i]) {
+ for (uint32_t i = counter->size; i > 0; i--) {
+ counter->data[i-1] += 1;
+ if (counter->data[i-1]) {
break;
}
/* seq[i] wrapped, so let it carry */
}
- return 0;
+ return S2N_RESULT_OK;
}
-static int s2n_drbg_block_encrypt(EVP_CIPHER_CTX * ctx, uint8_t in[S2N_DRBG_BLOCK_SIZE], uint8_t out[S2N_DRBG_BLOCK_SIZE])
+static S2N_RESULT s2n_drbg_block_encrypt(EVP_CIPHER_CTX *ctx, uint8_t in[S2N_DRBG_BLOCK_SIZE], uint8_t out[S2N_DRBG_BLOCK_SIZE])
{
- notnull_check(ctx);
+ RESULT_ENSURE_REF(ctx);
+
int len = S2N_DRBG_BLOCK_SIZE;
- GUARD_OSSL(EVP_EncryptUpdate(ctx, out, &len, in, S2N_DRBG_BLOCK_SIZE), S2N_ERR_DRBG);
- eq_check(len, S2N_DRBG_BLOCK_SIZE);
+ RESULT_GUARD_OSSL(EVP_EncryptUpdate(ctx, out, &len, in, S2N_DRBG_BLOCK_SIZE), S2N_ERR_DRBG);
+ RESULT_ENSURE_EQ(len, S2N_DRBG_BLOCK_SIZE);
- return 0;
+ return S2N_RESULT_OK;
}
-static int s2n_drbg_bits(struct s2n_drbg *drbg, struct s2n_blob *out)
+static S2N_RESULT s2n_drbg_bits(struct s2n_drbg *drbg, struct s2n_blob *out)
{
- notnull_check(drbg);
- notnull_check(drbg->ctx);
- notnull_check(out);
+ RESULT_ENSURE_REF(drbg);
+ RESULT_ENSURE_REF(drbg->ctx);
+ RESULT_ENSURE_REF(out);
struct s2n_blob value = {0};
- GUARD(s2n_blob_init(&value, drbg->v, sizeof(drbg->v)));
+ RESULT_GUARD_POSIX(s2n_blob_init(&value, drbg->v, sizeof(drbg->v)));
int block_aligned_size = out->size - (out->size % S2N_DRBG_BLOCK_SIZE);
/* Per NIST SP800-90A 10.2.1.2: */
for (int i = 0; i < block_aligned_size; i += S2N_DRBG_BLOCK_SIZE) {
- GUARD(s2n_increment_drbg_counter(&value));
- GUARD(s2n_drbg_block_encrypt(drbg->ctx, drbg->v, out->data + i));
+ RESULT_GUARD(s2n_increment_drbg_counter(&value));
+ RESULT_GUARD(s2n_drbg_block_encrypt(drbg->ctx, drbg->v, out->data + i));
drbg->bytes_used += S2N_DRBG_BLOCK_SIZE;
}
if (out->size <= block_aligned_size) {
- return 0;
+ return S2N_RESULT_OK;
}
uint8_t spare_block[S2N_DRBG_BLOCK_SIZE];
- GUARD(s2n_increment_drbg_counter(&value));
- GUARD(s2n_drbg_block_encrypt(drbg->ctx, drbg->v, spare_block));
+ RESULT_GUARD(s2n_increment_drbg_counter(&value));
+ RESULT_GUARD(s2n_drbg_block_encrypt(drbg->ctx, drbg->v, spare_block));
drbg->bytes_used += S2N_DRBG_BLOCK_SIZE;
- memcpy_check(out->data + block_aligned_size, spare_block, out->size - block_aligned_size);
+ RESULT_CHECKED_MEMCPY(out->data + block_aligned_size, spare_block, out->size - block_aligned_size);
- return 0;
+ return S2N_RESULT_OK;
}
-static int s2n_drbg_update(struct s2n_drbg *drbg, struct s2n_blob *provided_data)
+static S2N_RESULT s2n_drbg_update(struct s2n_drbg *drbg, struct s2n_blob *provided_data)
{
- notnull_check(drbg);
- notnull_check(drbg->ctx);
+ RESULT_ENSURE_REF(drbg);
+ RESULT_ENSURE_REF(drbg->ctx);
+ RESULT_ENSURE_REF(provided_data);
- s2n_stack_blob(temp_blob, s2n_drbg_seed_size(drgb), S2N_DRBG_MAX_SEED_SIZE);
+ RESULT_STACK_BLOB(temp_blob, s2n_drbg_seed_size(drgb), S2N_DRBG_MAX_SEED_SIZE);
- eq_check(provided_data->size, s2n_drbg_seed_size(drbg));
+ RESULT_ENSURE_EQ(provided_data->size, s2n_drbg_seed_size(drbg));
- GUARD(s2n_drbg_bits(drbg, &temp_blob));
+ RESULT_GUARD(s2n_drbg_bits(drbg, &temp_blob));
/* XOR in the provided data */
- for (int i = 0; i < provided_data->size; i++) {
+ for (uint32_t i = 0; i < provided_data->size; i++) {
temp_blob.data[i] ^= provided_data->data[i];
}
/* Update the key and value */
- GUARD_OSSL(EVP_EncryptInit_ex(drbg->ctx, NULL, NULL, temp_blob.data, NULL), S2N_ERR_DRBG);
+ RESULT_GUARD_OSSL(EVP_EncryptInit_ex(drbg->ctx, NULL, NULL, temp_blob.data, NULL), S2N_ERR_DRBG);
- memcpy_check(drbg->v, temp_blob.data + s2n_drbg_key_size(drbg), S2N_DRBG_BLOCK_SIZE);
+ RESULT_CHECKED_MEMCPY(drbg->v, temp_blob.data + s2n_drbg_key_size(drbg), S2N_DRBG_BLOCK_SIZE);
- return 0;
+ return S2N_RESULT_OK;
}
-static int s2n_drbg_mix_in_entropy(struct s2n_drbg *drbg, struct s2n_blob *entropy, struct s2n_blob *ps)
+static S2N_RESULT s2n_drbg_mix_in_entropy(struct s2n_drbg *drbg, struct s2n_blob *entropy, struct s2n_blob *ps)
{
- notnull_check(drbg);
- notnull_check(drbg->ctx);
- notnull_check(entropy);
+ RESULT_ENSURE_REF(drbg);
+ RESULT_ENSURE_REF(drbg->ctx);
+ RESULT_ENSURE_REF(entropy);
- gte_check(entropy->size, ps->size);
+ RESULT_ENSURE_GTE(entropy->size, ps->size);
- for (int i = 0; i < ps->size; i++) {
+ for (uint32_t i = 0; i < ps->size; i++) {
entropy->data[i] ^= ps->data[i];
}
- GUARD(s2n_drbg_update(drbg, entropy));
+ RESULT_GUARD(s2n_drbg_update(drbg, entropy));
- return 0;
+ return S2N_RESULT_OK;
}
-static int s2n_drbg_seed(struct s2n_drbg *drbg, struct s2n_blob *ps)
+static S2N_RESULT s2n_drbg_seed(struct s2n_drbg *drbg, struct s2n_blob *ps)
{
- s2n_stack_blob(blob, s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE);
+ RESULT_STACK_BLOB(blob, s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE);
- GUARD_AS_POSIX(s2n_get_seed_entropy(&blob));
- GUARD(s2n_drbg_mix_in_entropy(drbg, &blob, ps));
+ RESULT_GUARD(s2n_get_seed_entropy(&blob));
+ RESULT_GUARD(s2n_drbg_mix_in_entropy(drbg, &blob, ps));
drbg->bytes_used = 0;
- return 0;
+ return S2N_RESULT_OK;
}
-static int s2n_drbg_mix(struct s2n_drbg *drbg, struct s2n_blob *ps)
+static S2N_RESULT s2n_drbg_mix(struct s2n_drbg *drbg, struct s2n_blob *ps)
{
- s2n_stack_blob(blob, s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE);
+ if (s2n_unlikely(ignore_prediction_resistance_for_testing)) {
+ RESULT_ENSURE(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST);
+ return S2N_RESULT_OK;
+ }
+
+ RESULT_STACK_BLOB(blob, s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE);
- GUARD_AS_POSIX(s2n_get_mix_entropy(&blob));
- GUARD(s2n_drbg_mix_in_entropy(drbg, &blob, ps));
+ RESULT_GUARD(s2n_get_mix_entropy(&blob));
+ RESULT_GUARD(s2n_drbg_mix_in_entropy(drbg, &blob, ps));
drbg->mixes += 1;
- return 0;
+ return S2N_RESULT_OK;
}
-int s2n_drbg_instantiate(struct s2n_drbg *drbg, struct s2n_blob *personalization_string, const s2n_drbg_mode mode)
+S2N_RESULT s2n_drbg_instantiate(struct s2n_drbg *drbg, struct s2n_blob *personalization_string, const s2n_drbg_mode mode)
{
- notnull_check(drbg);
+ RESULT_ENSURE_REF(drbg);
+ RESULT_ENSURE_REF(personalization_string);
drbg->ctx = EVP_CIPHER_CTX_new();
- S2N_ERROR_IF(!drbg->ctx, S2N_ERR_DRBG);
+ RESULT_GUARD_PTR(drbg->ctx);
- s2n_evp_ctx_init(drbg->ctx);
+ RESULT_EVP_CTX_INIT(drbg->ctx);
switch(mode) {
case S2N_AES_128_CTR_NO_DF_PR:
- GUARD_OSSL(EVP_EncryptInit_ex(drbg->ctx, EVP_aes_128_ecb(), NULL, NULL, NULL), S2N_ERR_DRBG);
+ RESULT_GUARD_OSSL(EVP_EncryptInit_ex(drbg->ctx, EVP_aes_128_ecb(), NULL, NULL, NULL), S2N_ERR_DRBG);
break;
case S2N_AES_256_CTR_NO_DF_PR:
- GUARD_OSSL(EVP_EncryptInit_ex(drbg->ctx, EVP_aes_256_ecb(), NULL, NULL, NULL), S2N_ERR_DRBG);
+ RESULT_GUARD_OSSL(EVP_EncryptInit_ex(drbg->ctx, EVP_aes_256_ecb(), NULL, NULL, NULL), S2N_ERR_DRBG);
break;
default:
- S2N_ERROR(S2N_ERR_DRBG);
+ RESULT_BAIL(S2N_ERR_DRBG);
}
- lte_check(s2n_drbg_key_size(drbg), S2N_DRBG_MAX_KEY_SIZE);
- lte_check(s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE);
+ RESULT_ENSURE_LTE(s2n_drbg_key_size(drbg), S2N_DRBG_MAX_KEY_SIZE);
+ RESULT_ENSURE_LTE(s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE);
static const uint8_t zero_key[S2N_DRBG_MAX_KEY_SIZE] = {0};
/* Start off with zeroed data, per 10.2.1.3.1 item 4 and 5 */
memset(drbg->v, 0, sizeof(drbg->v));
- GUARD_OSSL(EVP_EncryptInit_ex(drbg->ctx, NULL, NULL, zero_key, NULL), S2N_ERR_DRBG);
+ RESULT_GUARD_OSSL(EVP_EncryptInit_ex(drbg->ctx, NULL, NULL, zero_key, NULL), S2N_ERR_DRBG);
/* Copy the personalization string */
- s2n_stack_blob(ps, s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE);
- GUARD(s2n_blob_zero(&ps));
+ RESULT_STACK_BLOB(ps, s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE);
+ RESULT_GUARD_POSIX(s2n_blob_zero(&ps));
- memcpy_check(ps.data, personalization_string->data, MIN(ps.size, personalization_string->size));
+ RESULT_CHECKED_MEMCPY(ps.data, personalization_string->data, MIN(ps.size, personalization_string->size));
/* Seed the DRBG */
- GUARD(s2n_drbg_seed(drbg, &ps));
+ RESULT_GUARD(s2n_drbg_seed(drbg, &ps));
- return 0;
+ return S2N_RESULT_OK;
}
-int s2n_drbg_generate(struct s2n_drbg *drbg, struct s2n_blob *blob)
+S2N_RESULT s2n_drbg_generate(struct s2n_drbg *drbg, struct s2n_blob *blob)
{
- notnull_check(drbg);
- notnull_check(drbg->ctx);
- s2n_stack_blob(zeros, s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE);
-
- S2N_ERROR_IF(blob->size > S2N_DRBG_GENERATE_LIMIT, S2N_ERR_DRBG_REQUEST_SIZE);
-
- /* Always mix in additional entropy, for prediction resistance */
- GUARD(s2n_drbg_mix(drbg, &zeros));
- GUARD(s2n_drbg_bits(drbg, blob));
- GUARD(s2n_drbg_update(drbg, &zeros));
-
- return 0;
+ RESULT_ENSURE_REF(drbg);
+ RESULT_ENSURE_REF(drbg->ctx);
+
+ RESULT_STACK_BLOB(zeros, s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE);
+
+ RESULT_ENSURE(blob->size <= S2N_DRBG_GENERATE_LIMIT, S2N_ERR_DRBG_REQUEST_SIZE);
+
+ /* Mix in additional entropy for every randomness generation call. This
+ * defense mechanism is referred to as "prediction resistance".
+ * If we ever relax this defense, we must:
+ * 1. Implement reseeding according to limit specified in
+ * NIST SP800-90A 10.2.1 Table 3.
+ * 2. Re-consider whether the current fork detection strategy is still
+ * sufficient.
+ */
+ RESULT_GUARD(s2n_drbg_mix(drbg, &zeros));
+ RESULT_GUARD(s2n_drbg_bits(drbg, blob));
+ RESULT_GUARD(s2n_drbg_update(drbg, &zeros));
+
+ return S2N_RESULT_OK;
}
-int s2n_drbg_wipe(struct s2n_drbg *drbg)
+S2N_RESULT s2n_drbg_wipe(struct s2n_drbg *drbg)
{
- notnull_check(drbg);
+ RESULT_ENSURE_REF(drbg);
+
if (drbg->ctx) {
- GUARD_OSSL(EVP_CIPHER_CTX_cleanup(drbg->ctx), S2N_ERR_DRBG);
+ RESULT_GUARD_OSSL(EVP_CIPHER_CTX_cleanup(drbg->ctx), S2N_ERR_DRBG);
EVP_CIPHER_CTX_free(drbg->ctx);
drbg->ctx = NULL;
}
*drbg = (struct s2n_drbg) {0};
- return 0;
+ return S2N_RESULT_OK;
}
-int s2n_drbg_bytes_used(struct s2n_drbg *drbg, uint64_t *bytes_used)
+S2N_RESULT s2n_drbg_bytes_used(struct s2n_drbg *drbg, uint64_t *bytes_used)
{
- notnull_check(drbg);
- notnull_check(bytes_used);
+ RESULT_ENSURE_REF(drbg);
+ RESULT_ENSURE_REF(bytes_used);
+
*bytes_used = drbg->bytes_used;
- return 0;
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_ignore_prediction_resistance_for_testing(bool ignore_bool) {
+ RESULT_ENSURE(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST);
+
+ ignore_prediction_resistance_for_testing = ignore_bool;
+
+ return S2N_RESULT_OK;
}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_drbg.h b/contrib/restricted/aws/s2n/crypto/s2n_drbg.h
index 58562d17eb..f3a5661554 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_drbg.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_drbg.h
@@ -28,9 +28,6 @@
/* The maximum size of any one request: from NIST SP800-90A 10.2.1 Table 3 */
#define S2N_DRBG_GENERATE_LIMIT 8192
-/* We reseed after 2^35 bytes have been generated: from NIST SP800-90A 10.2.1 Table 3 */
-#define S2N_DRBG_RESEED_LIMIT 34359738368
-
struct s2n_drbg {
/* Track how many bytes have been used */
uint64_t bytes_used;
@@ -55,12 +52,15 @@ typedef enum {S2N_AES_128_CTR_NO_DF_PR, S2N_AES_256_CTR_NO_DF_PR} s2n_drbg_mode;
/* Per NIST SP 800-90C 6.3
*
- * s2n's DRBG does provide prediction resistance
- * and does not support the additional_input parameter (which per 800-90C may be zero).
+ * s2n's DRBG uses prediction resistance and does not support the
+ * additional_input parameter (which per 800-90C may be zero).
*
- * The security strength provided by s2n's DRBG is either 128 or 256 bits depending on the s2n_drbg_mode passed in.
+ * The security strength provided by s2n's DRBG is either 128 or 256 bits
+ * depending on the s2n_drbg_mode passed in.
*/
-extern int s2n_drbg_instantiate(struct s2n_drbg *drbg, struct s2n_blob *personalization_string, const s2n_drbg_mode mode);
-extern int s2n_drbg_generate(struct s2n_drbg *drbg, struct s2n_blob *returned_bits);
-extern int s2n_drbg_wipe(struct s2n_drbg *drbg);
-extern int s2n_drbg_bytes_used(struct s2n_drbg *drbg, uint64_t *bytes_used);
+S2N_RESULT s2n_drbg_instantiate(struct s2n_drbg *drbg, struct s2n_blob *personalization_string, const s2n_drbg_mode mode);
+S2N_RESULT s2n_drbg_generate(struct s2n_drbg *drbg, struct s2n_blob *returned_bits);
+S2N_RESULT s2n_drbg_wipe(struct s2n_drbg *drbg);
+S2N_RESULT s2n_drbg_bytes_used(struct s2n_drbg *drbg, uint64_t *bytes_used);
+/* Use for testing only */
+S2N_RESULT s2n_ignore_prediction_resistance_for_testing(bool true_or_false);
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_ecc_evp.c b/contrib/restricted/aws/s2n/crypto/s2n_ecc_evp.c
index 7ae2a73094..fa137ee1f0 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_ecc_evp.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_ecc_evp.c
@@ -89,6 +89,16 @@ const struct s2n_ecc_named_curve s2n_ecc_curve_x25519 = {
const struct s2n_ecc_named_curve s2n_ecc_curve_x25519 = {0};
#endif
+/* A fake / unsupported curve for use in triggering retries
+ * during testing.
+ */
+const struct s2n_ecc_named_curve s2n_unsupported_curve = {
+ .iana_id = 0, .name = "unsupported",
+ .libcrypto_nid = NID_X9_62_prime256v1,
+ .share_size = SECP256R1_SHARE_SIZE,
+ .generate_key = s2n_ecc_evp_generate_key_nist_curves,
+};
+
/* All curves that s2n supports. New curves MUST be added here.
* This list is a super set of all the curves present in s2n_ecc_preferences list.
*/
@@ -116,8 +126,8 @@ static int s2n_ecc_evp_generate_key_x25519(const struct s2n_ecc_named_curve *nam
EVP_PKEY_CTX_free_pointer);
S2N_ERROR_IF(pctx == NULL, S2N_ERR_ECDHE_GEN_KEY);
- GUARD_OSSL(EVP_PKEY_keygen_init(pctx), S2N_ERR_ECDHE_GEN_KEY);
- GUARD_OSSL(EVP_PKEY_keygen(pctx, evp_pkey), S2N_ERR_ECDHE_GEN_KEY);
+ POSIX_GUARD_OSSL(EVP_PKEY_keygen_init(pctx), S2N_ERR_ECDHE_GEN_KEY);
+ POSIX_GUARD_OSSL(EVP_PKEY_keygen(pctx, evp_pkey), S2N_ERR_ECDHE_GEN_KEY);
S2N_ERROR_IF(evp_pkey == NULL, S2N_ERR_ECDHE_GEN_KEY);
return 0;
@@ -129,42 +139,47 @@ static int s2n_ecc_evp_generate_key_nist_curves(const struct s2n_ecc_named_curve
DEFER_CLEANUP(EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL), EVP_PKEY_CTX_free_pointer);
S2N_ERROR_IF(pctx == NULL, S2N_ERR_ECDHE_GEN_KEY);
- GUARD_OSSL(EVP_PKEY_paramgen_init(pctx), S2N_ERR_ECDHE_GEN_KEY);
- GUARD_OSSL(EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, named_curve->libcrypto_nid), S2N_ERR_ECDHE_GEN_KEY);
+ POSIX_GUARD_OSSL(EVP_PKEY_paramgen_init(pctx), S2N_ERR_ECDHE_GEN_KEY);
+ POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, named_curve->libcrypto_nid), S2N_ERR_ECDHE_GEN_KEY);
DEFER_CLEANUP(EVP_PKEY *params = NULL, EVP_PKEY_free_pointer);
- GUARD_OSSL(EVP_PKEY_paramgen(pctx, &params), S2N_ERR_ECDHE_GEN_KEY);
+ POSIX_GUARD_OSSL(EVP_PKEY_paramgen(pctx, &params), S2N_ERR_ECDHE_GEN_KEY);
S2N_ERROR_IF(params == NULL, S2N_ERR_ECDHE_GEN_KEY);
DEFER_CLEANUP(EVP_PKEY_CTX *kctx = EVP_PKEY_CTX_new(params, NULL), EVP_PKEY_CTX_free_pointer);
S2N_ERROR_IF(kctx == NULL, S2N_ERR_ECDHE_GEN_KEY);
- GUARD_OSSL(EVP_PKEY_keygen_init(kctx), S2N_ERR_ECDHE_GEN_KEY);
- GUARD_OSSL(EVP_PKEY_keygen(kctx, evp_pkey), S2N_ERR_ECDHE_GEN_KEY);
+ POSIX_GUARD_OSSL(EVP_PKEY_keygen_init(kctx), S2N_ERR_ECDHE_GEN_KEY);
+ POSIX_GUARD_OSSL(EVP_PKEY_keygen(kctx, evp_pkey), S2N_ERR_ECDHE_GEN_KEY);
S2N_ERROR_IF(evp_pkey == NULL, S2N_ERR_ECDHE_GEN_KEY);
return 0;
}
static int s2n_ecc_evp_generate_own_key(const struct s2n_ecc_named_curve *named_curve, EVP_PKEY **evp_pkey) {
- notnull_check(named_curve);
+ POSIX_ENSURE_REF(named_curve);
S2N_ERROR_IF(named_curve->generate_key == NULL, S2N_ERR_ECDHE_GEN_KEY);
return named_curve->generate_key(named_curve, evp_pkey);
}
static int s2n_ecc_evp_compute_shared_secret(EVP_PKEY *own_key, EVP_PKEY *peer_public, uint16_t iana_id, struct s2n_blob *shared_secret) {
- notnull_check(peer_public);
- notnull_check(own_key);
-
- /* From RFC 8446 Section 4.2.8.2: For the curves secp256r1 and secp384r1 peers MUST validate each other's
- * public value Q by ensuring that the point is a valid point on the elliptic curve.
- * For the curve x25519 the peer public-key validation check doesn't apply.
+ POSIX_ENSURE_REF(peer_public);
+ POSIX_ENSURE_REF(own_key);
+
+ /* From RFC 8446(TLS1.3) Section 4.2.8.2: For the curves secp256r1, secp384r1, and secp521r1, peers MUST validate
+ * each other's public value Q by ensuring that the point is a valid point on the elliptic curve.
+ * For the curve x25519 and x448 the peer public-key validation check doesn't apply.
+ * From RFC 8422(TLS1.2) Section 5.11: With the NIST curves, each party MUST validate the public key sent by its peer
+ * in the ClientKeyExchange and ServerKeyExchange messages. A receiving party MUST check that the x and y parameters from
+ * the peer's public value satisfy the curve equation, y^2 = x^3 + ax + b mod p.
+ * Note that the `EC_KEY_check_key` validation is a MUST for only NIST curves, if a non-NIST curve is added to s2n-tls
+ * this is an additional validation step that increases security but decreases performance.
*/
- if (iana_id == TLS_EC_CURVE_SECP_256_R1 || iana_id == TLS_EC_CURVE_SECP_384_R1) {
+ if (iana_id != TLS_EC_CURVE_ECDH_X25519 && iana_id != TLS_EC_CURVE_ECDH_X448) {
DEFER_CLEANUP(EC_KEY *ec_key = EVP_PKEY_get1_EC_KEY(peer_public), EC_KEY_free_pointer);
S2N_ERROR_IF(ec_key == NULL, S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
- GUARD_OSSL(EC_KEY_check_key(ec_key), S2N_ERR_ECDHE_SHARED_SECRET);
+ POSIX_GUARD_OSSL(EC_KEY_check_key(ec_key), S2N_ERR_ECDHE_SHARED_SECRET);
}
size_t shared_secret_size;
@@ -172,21 +187,21 @@ static int s2n_ecc_evp_compute_shared_secret(EVP_PKEY *own_key, EVP_PKEY *peer_p
DEFER_CLEANUP(EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(own_key, NULL), EVP_PKEY_CTX_free_pointer);
S2N_ERROR_IF(ctx == NULL, S2N_ERR_ECDHE_SHARED_SECRET);
- GUARD_OSSL(EVP_PKEY_derive_init(ctx), S2N_ERR_ECDHE_SHARED_SECRET);
- GUARD_OSSL(EVP_PKEY_derive_set_peer(ctx, peer_public), S2N_ERR_ECDHE_SHARED_SECRET);
- GUARD_OSSL(EVP_PKEY_derive(ctx, NULL, &shared_secret_size), S2N_ERR_ECDHE_SHARED_SECRET);
- GUARD(s2n_alloc(shared_secret, shared_secret_size));
+ POSIX_GUARD_OSSL(EVP_PKEY_derive_init(ctx), S2N_ERR_ECDHE_SHARED_SECRET);
+ POSIX_GUARD_OSSL(EVP_PKEY_derive_set_peer(ctx, peer_public), S2N_ERR_ECDHE_SHARED_SECRET);
+ POSIX_GUARD_OSSL(EVP_PKEY_derive(ctx, NULL, &shared_secret_size), S2N_ERR_ECDHE_SHARED_SECRET);
+ POSIX_GUARD(s2n_alloc(shared_secret, shared_secret_size));
if (EVP_PKEY_derive(ctx, shared_secret->data, &shared_secret_size) != 1) {
- GUARD(s2n_free(shared_secret));
- S2N_ERROR(S2N_ERR_ECDHE_SHARED_SECRET);
+ POSIX_GUARD(s2n_free(shared_secret));
+ POSIX_BAIL(S2N_ERR_ECDHE_SHARED_SECRET);
}
return 0;
}
int s2n_ecc_evp_generate_ephemeral_key(struct s2n_ecc_evp_params *ecc_evp_params) {
- notnull_check(ecc_evp_params->negotiated_curve);
+ POSIX_ENSURE_REF(ecc_evp_params->negotiated_curve);
S2N_ERROR_IF(ecc_evp_params->evp_pkey != NULL, S2N_ERR_ECDHE_GEN_KEY);
S2N_ERROR_IF(s2n_ecc_evp_generate_own_key(ecc_evp_params->negotiated_curve, &ecc_evp_params->evp_pkey) != 0,
S2N_ERR_ECDHE_GEN_KEY);
@@ -197,44 +212,44 @@ int s2n_ecc_evp_generate_ephemeral_key(struct s2n_ecc_evp_params *ecc_evp_params
int s2n_ecc_evp_compute_shared_secret_from_params(struct s2n_ecc_evp_params *private_ecc_evp_params,
struct s2n_ecc_evp_params *public_ecc_evp_params,
struct s2n_blob *shared_key) {
- notnull_check(private_ecc_evp_params->negotiated_curve);
- notnull_check(private_ecc_evp_params->evp_pkey);
- notnull_check(public_ecc_evp_params->negotiated_curve);
- notnull_check(public_ecc_evp_params->evp_pkey);
+ POSIX_ENSURE_REF(private_ecc_evp_params->negotiated_curve);
+ POSIX_ENSURE_REF(private_ecc_evp_params->evp_pkey);
+ POSIX_ENSURE_REF(public_ecc_evp_params->negotiated_curve);
+ POSIX_ENSURE_REF(public_ecc_evp_params->evp_pkey);
S2N_ERROR_IF(private_ecc_evp_params->negotiated_curve->iana_id != public_ecc_evp_params->negotiated_curve->iana_id,
S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
- GUARD(s2n_ecc_evp_compute_shared_secret(private_ecc_evp_params->evp_pkey, public_ecc_evp_params->evp_pkey,
+ POSIX_GUARD(s2n_ecc_evp_compute_shared_secret(private_ecc_evp_params->evp_pkey, public_ecc_evp_params->evp_pkey,
private_ecc_evp_params->negotiated_curve->iana_id, shared_key));
return 0;
}
int s2n_ecc_evp_compute_shared_secret_as_server(struct s2n_ecc_evp_params *ecc_evp_params,
struct s2n_stuffer *Yc_in, struct s2n_blob *shared_key) {
- notnull_check(ecc_evp_params->negotiated_curve);
- notnull_check(ecc_evp_params->evp_pkey);
- notnull_check(Yc_in);
+ POSIX_ENSURE_REF(ecc_evp_params->negotiated_curve);
+ POSIX_ENSURE_REF(ecc_evp_params->evp_pkey);
+ POSIX_ENSURE_REF(Yc_in);
uint8_t client_public_len;
struct s2n_blob client_public_blob = {0};
DEFER_CLEANUP(EVP_PKEY *peer_key = EVP_PKEY_new(), EVP_PKEY_free_pointer);
S2N_ERROR_IF(peer_key == NULL, S2N_ERR_BAD_MESSAGE);
- GUARD(s2n_stuffer_read_uint8(Yc_in, &client_public_len));
+ POSIX_GUARD(s2n_stuffer_read_uint8(Yc_in, &client_public_len));
client_public_blob.size = client_public_len;
client_public_blob.data = s2n_stuffer_raw_read(Yc_in, client_public_blob.size);
- notnull_check(client_public_blob.data);
+ POSIX_ENSURE_REF(client_public_blob.data);
#if EVP_APIS_SUPPORTED
if (ecc_evp_params->negotiated_curve->libcrypto_nid == NID_X25519) {
- GUARD(EVP_PKEY_set_type(peer_key, ecc_evp_params->negotiated_curve->libcrypto_nid));
+ POSIX_GUARD(EVP_PKEY_set_type(peer_key, ecc_evp_params->negotiated_curve->libcrypto_nid));
} else {
DEFER_CLEANUP(EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL), EVP_PKEY_CTX_free_pointer);
S2N_ERROR_IF(pctx == NULL, S2N_ERR_ECDHE_SERIALIZING);
- GUARD_OSSL(EVP_PKEY_paramgen_init(pctx), S2N_ERR_ECDHE_SERIALIZING);
- GUARD_OSSL(EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, ecc_evp_params->negotiated_curve->libcrypto_nid), S2N_ERR_ECDHE_SERIALIZING);
- GUARD_OSSL(EVP_PKEY_paramgen(pctx, &peer_key), S2N_ERR_ECDHE_SERIALIZING);
+ POSIX_GUARD_OSSL(EVP_PKEY_paramgen_init(pctx), S2N_ERR_ECDHE_SERIALIZING);
+ POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, ecc_evp_params->negotiated_curve->libcrypto_nid), S2N_ERR_ECDHE_SERIALIZING);
+ POSIX_GUARD_OSSL(EVP_PKEY_paramgen(pctx, &peer_key), S2N_ERR_ECDHE_SERIALIZING);
}
- GUARD_OSSL(EVP_PKEY_set1_tls_encodedpoint(peer_key, client_public_blob.data, client_public_blob.size),
+ POSIX_GUARD_OSSL(EVP_PKEY_set1_tls_encodedpoint(peer_key, client_public_blob.data, client_public_blob.size),
S2N_ERR_ECDHE_SERIALIZING);
#else
DEFER_CLEANUP(EC_KEY *ec_key = EC_KEY_new_by_curve_name(ecc_evp_params->negotiated_curve->libcrypto_nid),
@@ -245,7 +260,7 @@ int s2n_ecc_evp_compute_shared_secret_as_server(struct s2n_ecc_evp_params *ecc_e
S2N_ERROR_IF(point == NULL, S2N_ERR_BAD_MESSAGE);
int success = EC_KEY_set_public_key(ec_key, point);
- GUARD_OSSL(EVP_PKEY_set1_EC_KEY(peer_key, ec_key), S2N_ERR_ECDHE_SERIALIZING);
+ POSIX_GUARD_OSSL(EVP_PKEY_set1_EC_KEY(peer_key, ec_key), S2N_ERR_ECDHE_SERIALIZING);
S2N_ERROR_IF(success == 0, S2N_ERR_BAD_MESSAGE);
#endif
@@ -259,20 +274,20 @@ int s2n_ecc_evp_compute_shared_secret_as_client(struct s2n_ecc_evp_params *ecc_e
DEFER_CLEANUP(struct s2n_ecc_evp_params client_params = {0}, s2n_ecc_evp_params_free);
- notnull_check(ecc_evp_params->negotiated_curve);
+ POSIX_ENSURE_REF(ecc_evp_params->negotiated_curve);
client_params.negotiated_curve = ecc_evp_params->negotiated_curve;
- GUARD(s2n_ecc_evp_generate_own_key(client_params.negotiated_curve, &client_params.evp_pkey));
+ POSIX_GUARD(s2n_ecc_evp_generate_own_key(client_params.negotiated_curve, &client_params.evp_pkey));
S2N_ERROR_IF(client_params.evp_pkey == NULL, S2N_ERR_ECDHE_GEN_KEY);
if (s2n_ecc_evp_compute_shared_secret(client_params.evp_pkey, ecc_evp_params->evp_pkey,
ecc_evp_params->negotiated_curve->iana_id, shared_key) != S2N_SUCCESS) {
- S2N_ERROR(S2N_ERR_ECDHE_SHARED_SECRET);
+ POSIX_BAIL(S2N_ERR_ECDHE_SHARED_SECRET);
}
- GUARD(s2n_stuffer_write_uint8(Yc_out, client_params.negotiated_curve->share_size));
+ POSIX_GUARD(s2n_stuffer_write_uint8(Yc_out, client_params.negotiated_curve->share_size));
if (s2n_ecc_evp_write_params_point(&client_params, Yc_out) != 0) {
- S2N_ERROR(S2N_ERR_ECDHE_SERIALIZING);
+ POSIX_BAIL(S2N_ERR_ECDHE_SERIALIZING);
}
return 0;
@@ -297,50 +312,50 @@ static EC_POINT *s2n_ecc_evp_blob_to_point(struct s2n_blob *blob, const EC_KEY *
const EC_GROUP *group = EC_KEY_get0_group(ec_key);
EC_POINT *point = EC_POINT_new(group);
if (point == NULL) {
- S2N_ERROR_PTR(S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
+ PTR_BAIL(S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
}
if (EC_POINT_oct2point(group, point, blob->data, blob->size, NULL) != 1) {
EC_POINT_free(point);
- S2N_ERROR_PTR(S2N_ERR_BAD_MESSAGE);
+ PTR_BAIL(S2N_ERR_BAD_MESSAGE);
}
return point;
}
#endif
int s2n_ecc_evp_read_params_point(struct s2n_stuffer *in, int point_size, struct s2n_blob *point_blob) {
- notnull_check(in);
- notnull_check(point_blob);
- gte_check(point_size, 0);
+ POSIX_ENSURE_REF(in);
+ POSIX_ENSURE_REF(point_blob);
+ POSIX_ENSURE_GTE(point_size, 0);
/* Extract point from stuffer */
point_blob->size = point_size;
point_blob->data = s2n_stuffer_raw_read(in, point_size);
- notnull_check(point_blob->data);
+ POSIX_ENSURE_REF(point_blob->data);
return 0;
}
int s2n_ecc_evp_read_params(struct s2n_stuffer *in, struct s2n_blob *data_to_verify,
struct s2n_ecdhe_raw_server_params *raw_server_ecc_params) {
- notnull_check(in);
+ POSIX_ENSURE_REF(in);
uint8_t curve_type;
uint8_t point_length;
/* Remember where we started reading the data */
data_to_verify->data = s2n_stuffer_raw_read(in, 0);
- notnull_check(data_to_verify->data);
+ POSIX_ENSURE_REF(data_to_verify->data);
/* Read the curve */
- GUARD(s2n_stuffer_read_uint8(in, &curve_type));
+ POSIX_GUARD(s2n_stuffer_read_uint8(in, &curve_type));
S2N_ERROR_IF(curve_type != TLS_EC_CURVE_TYPE_NAMED, S2N_ERR_BAD_MESSAGE);
raw_server_ecc_params->curve_blob.data = s2n_stuffer_raw_read(in, 2);
- notnull_check(raw_server_ecc_params->curve_blob.data);
+ POSIX_ENSURE_REF(raw_server_ecc_params->curve_blob.data);
raw_server_ecc_params->curve_blob.size = 2;
/* Read the point */
- GUARD(s2n_stuffer_read_uint8(in, &point_length));
+ POSIX_GUARD(s2n_stuffer_read_uint8(in, &point_length));
- GUARD(s2n_ecc_evp_read_params_point(in, point_length, &raw_server_ecc_params->point_blob));
+ POSIX_GUARD(s2n_ecc_evp_read_params_point(in, point_length, &raw_server_ecc_params->point_blob));
/* curve type (1) + iana (2) + key share size (1) + key share */
data_to_verify->size = point_length + 4;
@@ -349,10 +364,10 @@ int s2n_ecc_evp_read_params(struct s2n_stuffer *in, struct s2n_blob *data_to_ver
}
int s2n_ecc_evp_write_params_point(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *out) {
- notnull_check(ecc_evp_params);
- notnull_check(ecc_evp_params->negotiated_curve);
- notnull_check(ecc_evp_params->evp_pkey);
- notnull_check(out);
+ POSIX_ENSURE_REF(ecc_evp_params);
+ POSIX_ENSURE_REF(ecc_evp_params->negotiated_curve);
+ POSIX_ENSURE_REF(ecc_evp_params->evp_pkey);
+ POSIX_ENSURE_REF(out);
#if EVP_APIS_SUPPORTED
struct s2n_blob point_blob = {0};
@@ -361,12 +376,12 @@ int s2n_ecc_evp_write_params_point(struct s2n_ecc_evp_params *ecc_evp_params, st
size_t size = EVP_PKEY_get1_tls_encodedpoint(ecc_evp_params->evp_pkey, &encoded_point);
if (size != ecc_evp_params->negotiated_curve->share_size) {
OPENSSL_free(encoded_point);
- S2N_ERROR(S2N_ERR_ECDHE_SERIALIZING);
+ POSIX_BAIL(S2N_ERR_ECDHE_SERIALIZING);
}
else {
point_blob.data = s2n_stuffer_raw_write(out, ecc_evp_params->negotiated_curve->share_size);
- notnull_check(point_blob.data);
- memcpy_check(point_blob.data, encoded_point, size);
+ POSIX_ENSURE_REF(point_blob.data);
+ POSIX_CHECKED_MEMCPY(point_blob.data, encoded_point, size);
OPENSSL_free(encoded_point);
}
#else
@@ -379,35 +394,35 @@ int s2n_ecc_evp_write_params_point(struct s2n_ecc_evp_params *ecc_evp_params, st
const EC_GROUP *group = EC_KEY_get0_group(ec_key);
S2N_ERROR_IF(point == NULL || group == NULL, S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
- GUARD(s2n_ecc_evp_calculate_point_length(point, group, &point_len));
+ POSIX_GUARD(s2n_ecc_evp_calculate_point_length(point, group, &point_len));
S2N_ERROR_IF(point_len != ecc_evp_params->negotiated_curve->share_size, S2N_ERR_ECDHE_SERIALIZING);
point_blob.data = s2n_stuffer_raw_write(out, point_len);
- notnull_check(point_blob.data);
+ POSIX_ENSURE_REF(point_blob.data);
point_blob.size = point_len;
- GUARD(s2n_ecc_evp_write_point_data_snug(point, group, &point_blob));
+ POSIX_GUARD(s2n_ecc_evp_write_point_data_snug(point, group, &point_blob));
#endif
return 0;
}
int s2n_ecc_evp_write_params(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *out,
struct s2n_blob *written) {
- notnull_check(ecc_evp_params);
- notnull_check(ecc_evp_params->negotiated_curve);
- notnull_check(ecc_evp_params->evp_pkey);
- notnull_check(out);
- notnull_check(written);
+ POSIX_ENSURE_REF(ecc_evp_params);
+ POSIX_ENSURE_REF(ecc_evp_params->negotiated_curve);
+ POSIX_ENSURE_REF(ecc_evp_params->evp_pkey);
+ POSIX_ENSURE_REF(out);
+ POSIX_ENSURE_REF(written);
uint8_t key_share_size = ecc_evp_params->negotiated_curve->share_size;
/* Remember where the written data starts */
written->data = s2n_stuffer_raw_write(out, 0);
- notnull_check(written->data);
+ POSIX_ENSURE_REF(written->data);
- GUARD(s2n_stuffer_write_uint8(out, TLS_EC_CURVE_TYPE_NAMED));
- GUARD(s2n_stuffer_write_uint16(out, ecc_evp_params->negotiated_curve->iana_id));
- GUARD(s2n_stuffer_write_uint8(out, key_share_size));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, TLS_EC_CURVE_TYPE_NAMED));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, ecc_evp_params->negotiated_curve->iana_id));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, key_share_size));
- GUARD(s2n_ecc_evp_write_params_point(ecc_evp_params, out));
+ POSIX_GUARD(s2n_ecc_evp_write_params_point(ecc_evp_params, out));
/* key share + key share size (1) + iana (2) + curve type (1) */
written->size = key_share_size + 4;
@@ -416,8 +431,8 @@ int s2n_ecc_evp_write_params(struct s2n_ecc_evp_params *ecc_evp_params, struct s
}
int s2n_ecc_evp_parse_params_point(struct s2n_blob *point_blob, struct s2n_ecc_evp_params *ecc_evp_params) {
- notnull_check(point_blob->data);
- notnull_check(ecc_evp_params->negotiated_curve);
+ POSIX_ENSURE_REF(point_blob->data);
+ POSIX_ENSURE_REF(ecc_evp_params->negotiated_curve);
S2N_ERROR_IF(point_blob->size != ecc_evp_params->negotiated_curve->share_size, S2N_ERR_ECDHE_SERIALIZING);
#if EVP_APIS_SUPPORTED
@@ -426,16 +441,16 @@ int s2n_ecc_evp_parse_params_point(struct s2n_blob *point_blob, struct s2n_ecc_e
ecc_evp_params->evp_pkey = EVP_PKEY_new();
}
S2N_ERROR_IF(ecc_evp_params->evp_pkey == NULL, S2N_ERR_BAD_MESSAGE);
- GUARD(EVP_PKEY_set_type(ecc_evp_params->evp_pkey, ecc_evp_params->negotiated_curve->libcrypto_nid));
+ POSIX_GUARD(EVP_PKEY_set_type(ecc_evp_params->evp_pkey, ecc_evp_params->negotiated_curve->libcrypto_nid));
}
else {
DEFER_CLEANUP(EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL), EVP_PKEY_CTX_free_pointer);
S2N_ERROR_IF(pctx == NULL, S2N_ERR_ECDHE_SERIALIZING);
- GUARD_OSSL(EVP_PKEY_paramgen_init(pctx), S2N_ERR_ECDHE_SERIALIZING);
- GUARD_OSSL(EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, ecc_evp_params->negotiated_curve->libcrypto_nid), S2N_ERR_ECDHE_SERIALIZING);
- GUARD_OSSL(EVP_PKEY_paramgen(pctx, &ecc_evp_params->evp_pkey), S2N_ERR_ECDHE_SERIALIZING);
+ POSIX_GUARD_OSSL(EVP_PKEY_paramgen_init(pctx), S2N_ERR_ECDHE_SERIALIZING);
+ POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, ecc_evp_params->negotiated_curve->libcrypto_nid), S2N_ERR_ECDHE_SERIALIZING);
+ POSIX_GUARD_OSSL(EVP_PKEY_paramgen(pctx, &ecc_evp_params->evp_pkey), S2N_ERR_ECDHE_SERIALIZING);
}
- GUARD_OSSL(EVP_PKEY_set1_tls_encodedpoint(ecc_evp_params->evp_pkey, point_blob->data, point_blob->size),
+ POSIX_GUARD_OSSL(EVP_PKEY_set1_tls_encodedpoint(ecc_evp_params->evp_pkey, point_blob->data, point_blob->size),
S2N_ERR_ECDHE_SERIALIZING);
#else
if (ecc_evp_params->evp_pkey == NULL) {
@@ -454,7 +469,7 @@ int s2n_ecc_evp_parse_params_point(struct s2n_blob *point_blob, struct s2n_ecc_e
/* Set the point as the public key */
int success = EC_KEY_set_public_key(ec_key, point);
- GUARD_OSSL(EVP_PKEY_set1_EC_KEY(ecc_evp_params->evp_pkey,ec_key), S2N_ERR_ECDHE_SERIALIZING);
+ POSIX_GUARD_OSSL(EVP_PKEY_set1_EC_KEY(ecc_evp_params->evp_pkey,ec_key), S2N_ERR_ECDHE_SERIALIZING);
/* EC_KEY_set_public_key returns 1 on success, 0 on failure */
S2N_ERROR_IF(success == 0, S2N_ERR_BAD_MESSAGE);
@@ -474,22 +489,22 @@ int s2n_ecc_evp_parse_params(struct s2n_ecdhe_raw_server_params *raw_server_ecc_
int s2n_ecc_evp_find_supported_curve(struct s2n_blob *iana_ids, const struct s2n_ecc_named_curve **found) {
struct s2n_stuffer iana_ids_in = {0};
- GUARD(s2n_stuffer_init(&iana_ids_in, iana_ids));
- GUARD(s2n_stuffer_write(&iana_ids_in, iana_ids));
- for (int i = 0; i < s2n_all_supported_curves_list_len; i++) {
+ POSIX_GUARD(s2n_stuffer_init(&iana_ids_in, iana_ids));
+ POSIX_GUARD(s2n_stuffer_write(&iana_ids_in, iana_ids));
+ for (size_t i = 0; i < s2n_all_supported_curves_list_len; i++) {
const struct s2n_ecc_named_curve *supported_curve = s2n_all_supported_curves_list[i];
- for (int j = 0; j < iana_ids->size / 2; j++) {
+ for (uint32_t j = 0; j < iana_ids->size / 2; j++) {
uint16_t iana_id;
- GUARD(s2n_stuffer_read_uint16(&iana_ids_in, &iana_id));
+ POSIX_GUARD(s2n_stuffer_read_uint16(&iana_ids_in, &iana_id));
if (supported_curve->iana_id == iana_id) {
*found = supported_curve;
return 0;
}
}
- GUARD(s2n_stuffer_reread(&iana_ids_in));
+ POSIX_GUARD(s2n_stuffer_reread(&iana_ids_in));
}
- S2N_ERROR(S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
+ POSIX_BAIL(S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
}
int s2n_ecc_evp_params_free(struct s2n_ecc_evp_params *ecc_evp_params) {
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_ecdsa.c b/contrib/restricted/aws/s2n/crypto/s2n_ecdsa.c
index 240f0f87b1..23050ffea4 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_ecdsa.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_ecdsa.c
@@ -27,48 +27,65 @@
#include "crypto/s2n_ecdsa.h"
#include "crypto/s2n_ecc_evp.h"
+#include "crypto/s2n_evp_signing.h"
#include "crypto/s2n_hash.h"
#include "crypto/s2n_openssl.h"
#include "crypto/s2n_pkey.h"
+#define S2N_ECDSA_TYPE 0
+
S2N_RESULT s2n_ecdsa_der_signature_size(const struct s2n_pkey *pkey, uint32_t *size_out)
{
- ENSURE_REF(pkey);
- ENSURE_REF(size_out);
+ RESULT_ENSURE_REF(pkey);
+ RESULT_ENSURE_REF(size_out);
const struct s2n_ecdsa_key *ecdsa_key = &pkey->key.ecdsa_key;
- ENSURE_REF(ecdsa_key->ec_key);
+ RESULT_ENSURE_REF(ecdsa_key->ec_key);
const int size = ECDSA_size(ecdsa_key->ec_key);
- GUARD_AS_RESULT(size);
+ RESULT_GUARD_POSIX(size);
*size_out = size;
return S2N_RESULT_OK;
}
+int s2n_ecdsa_sign_digest(const struct s2n_pkey *priv, struct s2n_blob *digest, struct s2n_blob *signature)
+{
+ POSIX_ENSURE_REF(priv);
+ POSIX_ENSURE_REF(digest);
+ POSIX_ENSURE_REF(signature);
+
+ const s2n_ecdsa_private_key *key = &priv->key.ecdsa_key;
+ POSIX_ENSURE_REF(key->ec_key);
+
+ unsigned int signature_size = signature->size;
+ POSIX_GUARD_OSSL(ECDSA_sign(S2N_ECDSA_TYPE, digest->data, digest->size, signature->data, &signature_size, key->ec_key), S2N_ERR_SIGN);
+ POSIX_ENSURE(signature_size <= signature->size, S2N_ERR_SIZE_MISMATCH);
+ signature->size = signature_size;
+
+ return S2N_SUCCESS;
+}
+
static int s2n_ecdsa_sign(const struct s2n_pkey *priv, s2n_signature_algorithm sig_alg,
struct s2n_hash_state *digest, struct s2n_blob *signature)
{
+ POSIX_ENSURE_REF(digest);
sig_alg_check(sig_alg, S2N_SIGNATURE_ECDSA);
- const s2n_ecdsa_private_key *key = &priv->key.ecdsa_key;
- notnull_check(key->ec_key);
-
- uint8_t digest_length;
- GUARD(s2n_hash_digest_size(digest->alg, &digest_length));
- lte_check(digest_length, S2N_MAX_DIGEST_LEN);
+ uint8_t digest_length = 0;
+ POSIX_GUARD(s2n_hash_digest_size(digest->alg, &digest_length));
+ POSIX_ENSURE_LTE(digest_length, S2N_MAX_DIGEST_LEN);
- uint8_t digest_out[S2N_MAX_DIGEST_LEN];
- GUARD(s2n_hash_digest(digest, digest_out, digest_length));
+ uint8_t digest_out[S2N_MAX_DIGEST_LEN] = { 0 };
+ POSIX_GUARD(s2n_hash_digest(digest, digest_out, digest_length));
- unsigned int signature_size = signature->size;
- GUARD_OSSL(ECDSA_sign(0, digest_out, digest_length, signature->data, &signature_size, key->ec_key), S2N_ERR_SIGN);
- S2N_ERROR_IF(signature_size > signature->size, S2N_ERR_SIZE_MISMATCH);
- signature->size = signature_size;
+ struct s2n_blob digest_blob = { 0 };
+ POSIX_GUARD(s2n_blob_init(&digest_blob, digest_out, digest_length));
+ POSIX_GUARD(s2n_ecdsa_sign_digest(priv, &digest_blob, signature));
- GUARD(s2n_hash_reset(digest));
+ POSIX_GUARD(s2n_hash_reset(digest));
- return 0;
+ return S2N_SUCCESS;
}
static int s2n_ecdsa_verify(const struct s2n_pkey *pub, s2n_signature_algorithm sig_alg,
@@ -77,19 +94,19 @@ static int s2n_ecdsa_verify(const struct s2n_pkey *pub, s2n_signature_algorithm
sig_alg_check(sig_alg, S2N_SIGNATURE_ECDSA);
const s2n_ecdsa_public_key *key = &pub->key.ecdsa_key;
- notnull_check(key->ec_key);
+ POSIX_ENSURE_REF(key->ec_key);
uint8_t digest_length;
- GUARD(s2n_hash_digest_size(digest->alg, &digest_length));
- lte_check(digest_length, S2N_MAX_DIGEST_LEN);
+ POSIX_GUARD(s2n_hash_digest_size(digest->alg, &digest_length));
+ POSIX_ENSURE_LTE(digest_length, S2N_MAX_DIGEST_LEN);
uint8_t digest_out[S2N_MAX_DIGEST_LEN];
- GUARD(s2n_hash_digest(digest, digest_out, digest_length));
+ POSIX_GUARD(s2n_hash_digest(digest, digest_out, digest_length));
/* ECDSA_verify ignores the first parameter */
- GUARD_OSSL(ECDSA_verify(0, digest_out, digest_length, signature->data, signature->size, key->ec_key), S2N_ERR_VERIFY_SIGNATURE);
+ POSIX_GUARD_OSSL(ECDSA_verify(0, digest_out, digest_length, signature->data, signature->size, key->ec_key), S2N_ERR_VERIFY_SIGNATURE);
- GUARD(s2n_hash_reset(digest));
+ POSIX_GUARD(s2n_hash_reset(digest));
return 0;
}
@@ -102,20 +119,20 @@ static int s2n_ecdsa_keys_match(const struct s2n_pkey *pub, const struct s2n_pke
DEFER_CLEANUP(struct s2n_hash_state state_out = { 0 }, s2n_hash_free);
/* s2n_hash_new only allocates memory when using high-level EVP hashes, currently restricted to FIPS mode. */
- GUARD(s2n_hash_new(&state_in));
- GUARD(s2n_hash_new(&state_out));
+ POSIX_GUARD(s2n_hash_new(&state_in));
+ POSIX_GUARD(s2n_hash_new(&state_out));
- GUARD(s2n_hash_init(&state_in, S2N_HASH_SHA1));
- GUARD(s2n_hash_init(&state_out, S2N_HASH_SHA1));
- GUARD(s2n_hash_update(&state_in, input, sizeof(input)));
- GUARD(s2n_hash_update(&state_out, input, sizeof(input)));
+ POSIX_GUARD(s2n_hash_init(&state_in, S2N_HASH_SHA1));
+ POSIX_GUARD(s2n_hash_init(&state_out, S2N_HASH_SHA1));
+ POSIX_GUARD(s2n_hash_update(&state_in, input, sizeof(input)));
+ POSIX_GUARD(s2n_hash_update(&state_out, input, sizeof(input)));
uint32_t size = 0;
- GUARD_AS_POSIX(s2n_ecdsa_der_signature_size(priv, &size));
- GUARD(s2n_alloc(&signature, size));
+ POSIX_GUARD_RESULT(s2n_ecdsa_der_signature_size(priv, &size));
+ POSIX_GUARD(s2n_alloc(&signature, size));
- GUARD(s2n_ecdsa_sign(priv, S2N_SIGNATURE_ECDSA, &state_in, &signature));
- GUARD(s2n_ecdsa_verify(pub, S2N_SIGNATURE_ECDSA, &state_out, &signature));
+ POSIX_GUARD(s2n_ecdsa_sign(priv, S2N_SIGNATURE_ECDSA, &state_in, &signature));
+ POSIX_GUARD(s2n_ecdsa_verify(pub, S2N_SIGNATURE_ECDSA, &state_out, &signature));
return 0;
}
@@ -136,7 +153,7 @@ static int s2n_ecdsa_key_free(struct s2n_pkey *pkey)
static int s2n_ecdsa_check_key_exists(const struct s2n_pkey *pkey)
{
const struct s2n_ecdsa_key *ecdsa_key = &pkey->key.ecdsa_key;
- notnull_check(ecdsa_key->ec_key);
+ POSIX_ENSURE_REF(ecdsa_key->ec_key);
return 0;
}
@@ -167,17 +184,18 @@ int s2n_ecdsa_pkey_init(struct s2n_pkey *pkey) {
pkey->match = &s2n_ecdsa_keys_match;
pkey->free = &s2n_ecdsa_key_free;
pkey->check_key = &s2n_ecdsa_check_key_exists;
+ POSIX_GUARD_RESULT(s2n_evp_signing_set_pkey_overrides(pkey));
return 0;
}
int s2n_ecdsa_pkey_matches_curve(const struct s2n_ecdsa_key *ecdsa_key, const struct s2n_ecc_named_curve *curve)
{
- notnull_check(ecdsa_key);
- notnull_check(ecdsa_key->ec_key);
- notnull_check(curve);
+ POSIX_ENSURE_REF(ecdsa_key);
+ POSIX_ENSURE_REF(ecdsa_key->ec_key);
+ POSIX_ENSURE_REF(curve);
int curve_id = EC_GROUP_get_curve_name(EC_KEY_get0_group(ecdsa_key->ec_key));
- eq_check(curve_id, curve->libcrypto_nid);
+ POSIX_ENSURE_EQ(curve_id, curve->libcrypto_nid);
return 0;
}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_ecdsa.h b/contrib/restricted/aws/s2n/crypto/s2n_ecdsa.h
index 6911cf4387..e4ad664349 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_ecdsa.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_ecdsa.h
@@ -17,7 +17,7 @@
#include <openssl/ecdsa.h>
#include <stdint.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include "stuffer/s2n_stuffer.h"
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_evp.c b/contrib/restricted/aws/s2n/crypto/s2n_evp.c
index 11aac21f75..81fecdce5b 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_evp.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_evp.c
@@ -20,7 +20,7 @@
int s2n_digest_allow_md5_for_fips(struct s2n_evp_digest *evp_digest)
{
- notnull_check(evp_digest);
+ POSIX_ENSURE_REF(evp_digest);
/* This is only to be used for EVP digests that will require MD5 to be used
* to comply with the TLS 1.0 and 1.1 RFC's for the PRF. MD5 cannot be used
* outside of the TLS 1.0 and 1.1 PRF when in FIPS mode.
@@ -35,13 +35,18 @@ int s2n_digest_allow_md5_for_fips(struct s2n_evp_digest *evp_digest)
S2N_RESULT s2n_digest_is_md5_allowed_for_fips(struct s2n_evp_digest *evp_digest, bool *out)
{
- ENSURE_REF(out);
+ RESULT_ENSURE_REF(out);
*out = false;
#if !defined(OPENSSL_IS_BORINGSSL) && !defined(OPENSSL_IS_AWSLC)
- if (evp_digest && evp_digest->ctx && s2n_is_in_fips_mode() && EVP_MD_CTX_test_flags(evp_digest->ctx, EVP_MD_CTX_FLAG_NON_FIPS_ALLOW)) {
+ if (s2n_is_in_fips_mode() && evp_digest && evp_digest->ctx && EVP_MD_CTX_test_flags(evp_digest->ctx, EVP_MD_CTX_FLAG_NON_FIPS_ALLOW)) {
/* s2n is in FIPS mode and the EVP digest allows MD5. */
*out = true;
}
+#else
+ if (s2n_is_in_fips_mode()) {
+ /* If s2n is in FIPS mode and built with AWS-LC or BoringSSL, there are no flags to check in the EVP digest to allow MD5. */
+ *out = true;
+ }
#endif
return S2N_RESULT_OK;
}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_evp.h b/contrib/restricted/aws/s2n/crypto/s2n_evp.h
index 92d30bccc8..6c443efba7 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_evp.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_evp.h
@@ -16,6 +16,7 @@
#pragma once
#include <openssl/evp.h>
+#include <openssl/hmac.h>
#include "crypto/s2n_openssl.h"
#include "utils/s2n_result.h"
@@ -27,7 +28,10 @@ struct s2n_evp_digest {
struct s2n_evp_hmac_state {
struct s2n_evp_digest evp_digest;
- EVP_PKEY *mac_key;
+ union {
+ HMAC_CTX *hmac_ctx;
+ EVP_PKEY *evp_pkey;
+ } ctx;
};
/* Define API's that change based on the OpenSSL Major Version. */
@@ -41,5 +45,12 @@ struct s2n_evp_hmac_state {
#define S2N_EVP_MD_CTX_FREE(md_ctx) (EVP_MD_CTX_destroy(md_ctx))
#endif
+/* On some versions of OpenSSL, "EVP_PKEY_CTX_set_signature_md()" is just a macro that casts digest_alg to "void*",
+ * which fails to compile when the "-Werror=cast-qual" compiler flag is enabled. So we work around this OpenSSL
+ * issue by turning off this compiler check for this one function with a cast through.
+ */
+#define S2N_EVP_PKEY_CTX_set_signature_md(ctx, md) \
+ EVP_PKEY_CTX_set_signature_md(ctx, (EVP_MD*) (uintptr_t) md)
+
extern int s2n_digest_allow_md5_for_fips(struct s2n_evp_digest *evp_digest);
extern S2N_RESULT s2n_digest_is_md5_allowed_for_fips(struct s2n_evp_digest *evp_digest, bool *out);
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_evp_signing.c b/contrib/restricted/aws/s2n/crypto/s2n_evp_signing.c
new file mode 100644
index 0000000000..147840c2dd
--- /dev/null
+++ b/contrib/restricted/aws/s2n/crypto/s2n_evp_signing.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include "error/s2n_errno.h"
+
+#include "crypto/s2n_evp.h"
+#include "crypto/s2n_evp_signing.h"
+#include "crypto/s2n_pkey.h"
+#include "crypto/s2n_rsa_pss.h"
+
+#include "utils/s2n_safety.h"
+
+/*
+ * FIPS 140-3 requires that we don't pass raw digest bytes to the libcrypto signing methods.
+ * In order to do that, we need to use signing methods that both calculate the digest and
+ * perform the signature.
+ */
+
+static S2N_RESULT s2n_evp_md_ctx_set_pkey_ctx(EVP_MD_CTX *ctx, EVP_PKEY_CTX *pctx)
+{
+#ifdef S2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ EVP_MD_CTX_set_pkey_ctx(ctx, pctx);
+ return S2N_RESULT_OK;
+#else
+ RESULT_BAIL(S2N_ERR_UNIMPLEMENTED);
+#endif
+}
+
+static S2N_RESULT s2n_evp_pkey_set_rsa_pss_saltlen(EVP_PKEY_CTX *pctx)
+{
+#if RSA_PSS_SIGNING_SUPPORTED
+ RESULT_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_pss_saltlen(pctx, RSA_PSS_SALTLEN_DIGEST), S2N_ERR_PKEY_CTX_INIT);
+ return S2N_RESULT_OK;
+#else
+ RESULT_BAIL(S2N_ERR_UNIMPLEMENTED);
+#endif
+}
+
+bool s2n_evp_signing_supported()
+{
+#ifdef S2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ /* We can only use EVP signing if the hash state has an EVP_MD_CTX
+ * that we can pass to the EVP signing methods.
+ */
+ return s2n_hash_evp_fully_supported();
+#else
+ return false;
+#endif
+}
+
+/* If using EVP signing, override the sign and verify pkey methods.
+ * The EVP methods can handle all pkey types / signature algorithms.
+ */
+S2N_RESULT s2n_evp_signing_set_pkey_overrides(struct s2n_pkey *pkey)
+{
+ if (s2n_evp_signing_supported()) {
+ RESULT_ENSURE_REF(pkey);
+ pkey->sign = &s2n_evp_sign;
+ pkey->verify = &s2n_evp_verify;
+ }
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_evp_signing_validate_hash_alg(s2n_signature_algorithm sig_alg, s2n_hash_algorithm hash_alg)
+{
+ switch(hash_alg) {
+ case S2N_HASH_NONE:
+ case S2N_HASH_MD5:
+ /* MD5 alone is never supported */
+ RESULT_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM);
+ break;
+ case S2N_HASH_MD5_SHA1:
+ /* Only RSA supports MD5+SHA1.
+ * This should not be a problem, as we only allow MD5+SHA1 when
+ * falling back to TLS1.0 or 1.1, which only support RSA.
+ */
+ RESULT_ENSURE(sig_alg == S2N_SIGNATURE_RSA, S2N_ERR_HASH_INVALID_ALGORITHM);
+ break;
+ default:
+ break;
+ }
+ /* Hash algorithm must be recognized and supported by EVP_MD */
+ RESULT_ENSURE(s2n_hash_alg_to_evp_md(hash_alg) != NULL, S2N_ERR_HASH_INVALID_ALGORITHM);
+ return S2N_RESULT_OK;
+}
+
+int s2n_evp_sign(const struct s2n_pkey *priv, s2n_signature_algorithm sig_alg,
+ struct s2n_hash_state *hash_state, struct s2n_blob *signature)
+{
+ POSIX_ENSURE_REF(priv);
+ POSIX_ENSURE_REF(hash_state);
+ POSIX_ENSURE_REF(signature);
+ POSIX_ENSURE(s2n_evp_signing_supported(), S2N_ERR_HASH_NOT_READY);
+ POSIX_GUARD_RESULT(s2n_evp_signing_validate_hash_alg(sig_alg, hash_state->alg));
+
+ EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new(priv->pkey, NULL);
+ POSIX_ENSURE_REF(pctx);
+ POSIX_GUARD_OSSL(EVP_PKEY_sign_init(pctx), S2N_ERR_PKEY_CTX_INIT);
+ POSIX_GUARD_OSSL(S2N_EVP_PKEY_CTX_set_signature_md(pctx, s2n_hash_alg_to_evp_md(hash_state->alg)), S2N_ERR_PKEY_CTX_INIT);
+
+ if (sig_alg == S2N_SIGNATURE_RSA_PSS_RSAE || sig_alg == S2N_SIGNATURE_RSA_PSS_PSS) {
+ POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_padding(pctx, RSA_PKCS1_PSS_PADDING), S2N_ERR_PKEY_CTX_INIT);
+ POSIX_GUARD_RESULT(s2n_evp_pkey_set_rsa_pss_saltlen(pctx));
+ }
+
+ EVP_MD_CTX *ctx = hash_state->digest.high_level.evp.ctx;
+ POSIX_ENSURE_REF(ctx);
+ POSIX_GUARD_RESULT(s2n_evp_md_ctx_set_pkey_ctx(ctx, pctx));
+
+ size_t signature_size = signature->size;
+ POSIX_GUARD_OSSL(EVP_DigestSignFinal(ctx, signature->data, &signature_size), S2N_ERR_SIGN);
+ POSIX_ENSURE(signature_size <= signature->size, S2N_ERR_SIZE_MISMATCH);
+ signature->size = signature_size;
+ return S2N_SUCCESS;
+}
+
+int s2n_evp_verify(const struct s2n_pkey *pub, s2n_signature_algorithm sig_alg,
+ struct s2n_hash_state *hash_state, struct s2n_blob *signature)
+{
+ POSIX_ENSURE_REF(pub);
+ POSIX_ENSURE_REF(hash_state);
+ POSIX_ENSURE_REF(signature);
+ POSIX_ENSURE(s2n_evp_signing_supported(), S2N_ERR_HASH_NOT_READY);
+ POSIX_GUARD_RESULT(s2n_evp_signing_validate_hash_alg(sig_alg, hash_state->alg));
+
+ EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new(pub->pkey, NULL);
+ POSIX_ENSURE_REF(pctx);
+ POSIX_GUARD_OSSL(EVP_PKEY_verify_init(pctx), S2N_ERR_PKEY_CTX_INIT);
+ POSIX_GUARD_OSSL(S2N_EVP_PKEY_CTX_set_signature_md(pctx, s2n_hash_alg_to_evp_md(hash_state->alg)), S2N_ERR_PKEY_CTX_INIT);
+
+ if (sig_alg == S2N_SIGNATURE_RSA_PSS_RSAE || sig_alg == S2N_SIGNATURE_RSA_PSS_PSS) {
+ POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_padding(pctx, RSA_PKCS1_PSS_PADDING), S2N_ERR_PKEY_CTX_INIT);
+ POSIX_GUARD_RESULT(s2n_evp_pkey_set_rsa_pss_saltlen(pctx));
+ }
+
+ EVP_MD_CTX *ctx = hash_state->digest.high_level.evp.ctx;
+ POSIX_ENSURE_REF(ctx);
+ POSIX_GUARD_RESULT(s2n_evp_md_ctx_set_pkey_ctx(ctx, pctx));
+
+ POSIX_GUARD_OSSL(EVP_DigestVerifyFinal(ctx, signature->data, signature->size), S2N_ERR_VERIFY_SIGNATURE);
+ return S2N_SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_evp_signing.h b/contrib/restricted/aws/s2n/crypto/s2n_evp_signing.h
new file mode 100644
index 0000000000..872f63b682
--- /dev/null
+++ b/contrib/restricted/aws/s2n/crypto/s2n_evp_signing.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#pragma once
+
+#include "api/s2n.h"
+
+#include "crypto/s2n_hash.h"
+#include "crypto/s2n_signature.h"
+#include "utils/s2n_blob.h"
+
+bool s2n_evp_signing_supported();
+S2N_RESULT s2n_evp_signing_set_pkey_overrides(struct s2n_pkey *pkey);
+int s2n_evp_sign(const struct s2n_pkey *priv, s2n_signature_algorithm sig_alg,
+ struct s2n_hash_state *digest, struct s2n_blob *signature);
+int s2n_evp_verify(const struct s2n_pkey *pub, s2n_signature_algorithm sig_alg,
+ struct s2n_hash_state *digest, struct s2n_blob *signature);
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_fips.c b/contrib/restricted/aws/s2n/crypto/s2n_fips.c
index d939cc3b53..8843f4ebd9 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_fips.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_fips.c
@@ -23,13 +23,19 @@ int s2n_fips_init(void)
{
s2n_fips_mode = 0;
-#ifdef OPENSSL_FIPS
- /* FIPS mode can be entered only if OPENSSL_FIPS is defined */
+ /* FIPS mode can be checked if OpenSSL was configured and built for FIPS which then defines OPENSSL_FIPS.
+ *
+ * AWS-LC always defines FIPS_mode() that you can call and check what the library was built with. It does not define
+ * a public OPENSSL_FIPS/AWSLC_FIPS macro that we can (or need to) check here
+ *
+ * Note: FIPS_mode() does not change the FIPS state of libcrypto. This only returns the current state. Applications
+ * using s2n must call FIPS_mode_set(1) prior to s2n_init.
+ * */
+#if defined(OPENSSL_FIPS) || defined(OPENSSL_IS_AWSLC)
if (FIPS_mode()) {
s2n_fips_mode = 1;
}
#endif
-
return 0;
}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_hash.c b/contrib/restricted/aws/s2n/crypto/s2n_hash.c
index 27a9358438..b2d817851d 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_hash.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_hash.c
@@ -22,9 +22,52 @@
#include "utils/s2n_safety.h"
+static bool s2n_use_custom_md5_sha1()
+{
+#if defined(S2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH)
+ return false;
+#else
+ return true;
+#endif
+}
+
+static bool s2n_use_evp_impl()
+{
+ return s2n_is_in_fips_mode();
+}
+
+bool s2n_hash_evp_fully_supported()
+{
+ return s2n_use_evp_impl() && !s2n_use_custom_md5_sha1();
+}
+
+const EVP_MD* s2n_hash_alg_to_evp_md(s2n_hash_algorithm alg)
+{
+ switch (alg) {
+ case S2N_HASH_MD5:
+ return EVP_md5();
+ case S2N_HASH_SHA1:
+ return EVP_sha1();
+ case S2N_HASH_SHA224:
+ return EVP_sha224();
+ case S2N_HASH_SHA256:
+ return EVP_sha256();
+ case S2N_HASH_SHA384:
+ return EVP_sha384();
+ case S2N_HASH_SHA512:
+ return EVP_sha512();
+#if defined(S2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH)
+ case S2N_HASH_MD5_SHA1:
+ return EVP_md5_sha1();
+#endif
+ default:
+ return NULL;
+ }
+}
+
int s2n_hash_digest_size(s2n_hash_algorithm alg, uint8_t *out)
{
- ENSURE_POSIX(S2N_MEM_IS_WRITABLE(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION);
+ POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION);
switch (alg) {
case S2N_HASH_NONE: *out = 0; break;
case S2N_HASH_MD5: *out = MD5_DIGEST_LENGTH; break;
@@ -35,7 +78,7 @@ int s2n_hash_digest_size(s2n_hash_algorithm alg, uint8_t *out)
case S2N_HASH_SHA512: *out = SHA512_DIGEST_LENGTH; break;
case S2N_HASH_MD5_SHA1: *out = MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH; break;
default:
- S2N_ERROR(S2N_ERR_HASH_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM);
}
return S2N_SUCCESS;
}
@@ -45,7 +88,7 @@ int s2n_hash_digest_size(s2n_hash_algorithm alg, uint8_t *out)
* If this ever becomes untrue, this would require fixing*/
int s2n_hash_block_size(s2n_hash_algorithm alg, uint64_t *block_size)
{
- ENSURE_POSIX(S2N_MEM_IS_WRITABLE(block_size, sizeof(*block_size)), S2N_ERR_PRECONDITION_VIOLATION);
+ POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(block_size, sizeof(*block_size)), S2N_ERR_PRECONDITION_VIOLATION);
switch(alg) {
case S2N_HASH_NONE: *block_size = 64; break;
case S2N_HASH_MD5: *block_size = 64; break;
@@ -56,7 +99,7 @@ int s2n_hash_block_size(s2n_hash_algorithm alg, uint64_t *block_size)
case S2N_HASH_SHA512: *block_size = 128; break;
case S2N_HASH_MD5_SHA1: *block_size = 64; break;
default:
- S2N_ERROR(S2N_ERR_HASH_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM);
}
return S2N_SUCCESS;
}
@@ -84,7 +127,7 @@ bool s2n_hash_is_available(s2n_hash_algorithm alg)
int s2n_hash_is_ready_for_input(struct s2n_hash_state *state)
{
- PRECONDITION_POSIX(s2n_hash_state_validate(state));
+ POSIX_PRECONDITION(s2n_hash_state_validate(state));
return state->is_ready_for_input;
}
@@ -94,8 +137,7 @@ static int s2n_low_level_hash_new(struct s2n_hash_state *state)
* being used. For the s2n_low_level_hash implementation, new is a no-op.
*/
- state->is_ready_for_input = 0;
- state->currently_in_hash = 0;
+ *state = (struct s2n_hash_state) { 0 };
return S2N_SUCCESS;
}
@@ -105,30 +147,30 @@ static int s2n_low_level_hash_init(struct s2n_hash_state *state, s2n_hash_algori
case S2N_HASH_NONE:
break;
case S2N_HASH_MD5:
- GUARD_OSSL(MD5_Init(&state->digest.low_level.md5), S2N_ERR_HASH_INIT_FAILED);
+ POSIX_GUARD_OSSL(MD5_Init(&state->digest.low_level.md5), S2N_ERR_HASH_INIT_FAILED);
break;
case S2N_HASH_SHA1:
- GUARD_OSSL(SHA1_Init(&state->digest.low_level.sha1), S2N_ERR_HASH_INIT_FAILED);
+ POSIX_GUARD_OSSL(SHA1_Init(&state->digest.low_level.sha1), S2N_ERR_HASH_INIT_FAILED);
break;
case S2N_HASH_SHA224:
- GUARD_OSSL(SHA224_Init(&state->digest.low_level.sha224), S2N_ERR_HASH_INIT_FAILED);
+ POSIX_GUARD_OSSL(SHA224_Init(&state->digest.low_level.sha224), S2N_ERR_HASH_INIT_FAILED);
break;
case S2N_HASH_SHA256:
- GUARD_OSSL(SHA256_Init(&state->digest.low_level.sha256), S2N_ERR_HASH_INIT_FAILED);
+ POSIX_GUARD_OSSL(SHA256_Init(&state->digest.low_level.sha256), S2N_ERR_HASH_INIT_FAILED);
break;
case S2N_HASH_SHA384:
- GUARD_OSSL(SHA384_Init(&state->digest.low_level.sha384), S2N_ERR_HASH_INIT_FAILED);
+ POSIX_GUARD_OSSL(SHA384_Init(&state->digest.low_level.sha384), S2N_ERR_HASH_INIT_FAILED);
break;
case S2N_HASH_SHA512:
- GUARD_OSSL(SHA512_Init(&state->digest.low_level.sha512), S2N_ERR_HASH_INIT_FAILED);
+ POSIX_GUARD_OSSL(SHA512_Init(&state->digest.low_level.sha512), S2N_ERR_HASH_INIT_FAILED);
break;
case S2N_HASH_MD5_SHA1:
- GUARD_OSSL(SHA1_Init(&state->digest.low_level.md5_sha1.sha1), S2N_ERR_HASH_INIT_FAILED);;
- GUARD_OSSL(MD5_Init(&state->digest.low_level.md5_sha1.md5), S2N_ERR_HASH_INIT_FAILED);;
+ POSIX_GUARD_OSSL(SHA1_Init(&state->digest.low_level.md5_sha1.sha1), S2N_ERR_HASH_INIT_FAILED);;
+ POSIX_GUARD_OSSL(MD5_Init(&state->digest.low_level.md5_sha1.md5), S2N_ERR_HASH_INIT_FAILED);;
break;
default:
- S2N_ERROR(S2N_ERR_HASH_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM);
}
state->alg = alg;
@@ -140,38 +182,38 @@ static int s2n_low_level_hash_init(struct s2n_hash_state *state, s2n_hash_algori
static int s2n_low_level_hash_update(struct s2n_hash_state *state, const void *data, uint32_t size)
{
- ENSURE_POSIX(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY);
+ POSIX_ENSURE(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY);
switch (state->alg) {
case S2N_HASH_NONE:
break;
case S2N_HASH_MD5:
- GUARD_OSSL(MD5_Update(&state->digest.low_level.md5, data, size), S2N_ERR_HASH_UPDATE_FAILED);
+ POSIX_GUARD_OSSL(MD5_Update(&state->digest.low_level.md5, data, size), S2N_ERR_HASH_UPDATE_FAILED);
break;
case S2N_HASH_SHA1:
- GUARD_OSSL(SHA1_Update(&state->digest.low_level.sha1, data, size), S2N_ERR_HASH_UPDATE_FAILED);
+ POSIX_GUARD_OSSL(SHA1_Update(&state->digest.low_level.sha1, data, size), S2N_ERR_HASH_UPDATE_FAILED);
break;
case S2N_HASH_SHA224:
- GUARD_OSSL(SHA224_Update(&state->digest.low_level.sha224, data, size), S2N_ERR_HASH_UPDATE_FAILED);
+ POSIX_GUARD_OSSL(SHA224_Update(&state->digest.low_level.sha224, data, size), S2N_ERR_HASH_UPDATE_FAILED);
break;
case S2N_HASH_SHA256:
- GUARD_OSSL(SHA256_Update(&state->digest.low_level.sha256, data, size), S2N_ERR_HASH_UPDATE_FAILED);
+ POSIX_GUARD_OSSL(SHA256_Update(&state->digest.low_level.sha256, data, size), S2N_ERR_HASH_UPDATE_FAILED);
break;
case S2N_HASH_SHA384:
- GUARD_OSSL(SHA384_Update(&state->digest.low_level.sha384, data, size), S2N_ERR_HASH_UPDATE_FAILED);
+ POSIX_GUARD_OSSL(SHA384_Update(&state->digest.low_level.sha384, data, size), S2N_ERR_HASH_UPDATE_FAILED);
break;
case S2N_HASH_SHA512:
- GUARD_OSSL(SHA512_Update(&state->digest.low_level.sha512, data, size), S2N_ERR_HASH_UPDATE_FAILED);
+ POSIX_GUARD_OSSL(SHA512_Update(&state->digest.low_level.sha512, data, size), S2N_ERR_HASH_UPDATE_FAILED);
break;
case S2N_HASH_MD5_SHA1:
- GUARD_OSSL(SHA1_Update(&state->digest.low_level.md5_sha1.sha1, data, size), S2N_ERR_HASH_UPDATE_FAILED);
- GUARD_OSSL(MD5_Update(&state->digest.low_level.md5_sha1.md5, data, size), S2N_ERR_HASH_UPDATE_FAILED);
+ POSIX_GUARD_OSSL(SHA1_Update(&state->digest.low_level.md5_sha1.sha1, data, size), S2N_ERR_HASH_UPDATE_FAILED);
+ POSIX_GUARD_OSSL(MD5_Update(&state->digest.low_level.md5_sha1.md5, data, size), S2N_ERR_HASH_UPDATE_FAILED);
break;
default:
- S2N_ERROR(S2N_ERR_HASH_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM);
}
- ENSURE_POSIX(size <= (UINT64_MAX - state->currently_in_hash), S2N_ERR_INTEGER_OVERFLOW);
+ POSIX_ENSURE(size <= (UINT64_MAX - state->currently_in_hash), S2N_ERR_INTEGER_OVERFLOW);
state->currently_in_hash += size;
return S2N_SUCCESS;
@@ -179,42 +221,42 @@ static int s2n_low_level_hash_update(struct s2n_hash_state *state, const void *d
static int s2n_low_level_hash_digest(struct s2n_hash_state *state, void *out, uint32_t size)
{
- ENSURE_POSIX(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY);
+ POSIX_ENSURE(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY);
switch (state->alg) {
case S2N_HASH_NONE:
break;
case S2N_HASH_MD5:
- eq_check(size, MD5_DIGEST_LENGTH);
- GUARD_OSSL(MD5_Final(out, &state->digest.low_level.md5), S2N_ERR_HASH_DIGEST_FAILED);
+ POSIX_ENSURE_EQ(size, MD5_DIGEST_LENGTH);
+ POSIX_GUARD_OSSL(MD5_Final(out, &state->digest.low_level.md5), S2N_ERR_HASH_DIGEST_FAILED);
break;
case S2N_HASH_SHA1:
- eq_check(size, SHA_DIGEST_LENGTH);
- GUARD_OSSL(SHA1_Final(out, &state->digest.low_level.sha1), S2N_ERR_HASH_DIGEST_FAILED);
+ POSIX_ENSURE_EQ(size, SHA_DIGEST_LENGTH);
+ POSIX_GUARD_OSSL(SHA1_Final(out, &state->digest.low_level.sha1), S2N_ERR_HASH_DIGEST_FAILED);
break;
case S2N_HASH_SHA224:
- eq_check(size, SHA224_DIGEST_LENGTH);
- GUARD_OSSL(SHA224_Final(out, &state->digest.low_level.sha224), S2N_ERR_HASH_DIGEST_FAILED);
+ POSIX_ENSURE_EQ(size, SHA224_DIGEST_LENGTH);
+ POSIX_GUARD_OSSL(SHA224_Final(out, &state->digest.low_level.sha224), S2N_ERR_HASH_DIGEST_FAILED);
break;
case S2N_HASH_SHA256:
- eq_check(size, SHA256_DIGEST_LENGTH);
- GUARD_OSSL(SHA256_Final(out, &state->digest.low_level.sha256), S2N_ERR_HASH_DIGEST_FAILED);
+ POSIX_ENSURE_EQ(size, SHA256_DIGEST_LENGTH);
+ POSIX_GUARD_OSSL(SHA256_Final(out, &state->digest.low_level.sha256), S2N_ERR_HASH_DIGEST_FAILED);
break;
case S2N_HASH_SHA384:
- eq_check(size, SHA384_DIGEST_LENGTH);
- GUARD_OSSL(SHA384_Final(out, &state->digest.low_level.sha384), S2N_ERR_HASH_DIGEST_FAILED);
+ POSIX_ENSURE_EQ(size, SHA384_DIGEST_LENGTH);
+ POSIX_GUARD_OSSL(SHA384_Final(out, &state->digest.low_level.sha384), S2N_ERR_HASH_DIGEST_FAILED);
break;
case S2N_HASH_SHA512:
- eq_check(size, SHA512_DIGEST_LENGTH);
- GUARD_OSSL(SHA512_Final(out, &state->digest.low_level.sha512), S2N_ERR_HASH_DIGEST_FAILED);
+ POSIX_ENSURE_EQ(size, SHA512_DIGEST_LENGTH);
+ POSIX_GUARD_OSSL(SHA512_Final(out, &state->digest.low_level.sha512), S2N_ERR_HASH_DIGEST_FAILED);
break;
case S2N_HASH_MD5_SHA1:
- eq_check(size, MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH);
- GUARD_OSSL(SHA1_Final(((uint8_t *) out) + MD5_DIGEST_LENGTH, &state->digest.low_level.md5_sha1.sha1), S2N_ERR_HASH_DIGEST_FAILED);
- GUARD_OSSL(MD5_Final(out, &state->digest.low_level.md5_sha1.md5), S2N_ERR_HASH_DIGEST_FAILED);
+ POSIX_ENSURE_EQ(size, MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH);
+ POSIX_GUARD_OSSL(SHA1_Final(((uint8_t *) out) + MD5_DIGEST_LENGTH, &state->digest.low_level.md5_sha1.sha1), S2N_ERR_HASH_DIGEST_FAILED);
+ POSIX_GUARD_OSSL(MD5_Final(out, &state->digest.low_level.md5_sha1.md5), S2N_ERR_HASH_DIGEST_FAILED);
break;
default:
- S2N_ERROR(S2N_ERR_HASH_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM);
}
state->currently_in_hash = 0;
@@ -224,7 +266,7 @@ static int s2n_low_level_hash_digest(struct s2n_hash_state *state, void *out, ui
static int s2n_low_level_hash_copy(struct s2n_hash_state *to, struct s2n_hash_state *from)
{
- memcpy_check(to, from, sizeof(struct s2n_hash_state));
+ POSIX_CHECKED_MEMCPY(to, from, sizeof(struct s2n_hash_state));
return 0;
}
@@ -245,8 +287,11 @@ static int s2n_low_level_hash_free(struct s2n_hash_state *state)
static int s2n_evp_hash_new(struct s2n_hash_state *state)
{
- notnull_check(state->digest.high_level.evp.ctx = S2N_EVP_MD_CTX_NEW());
- notnull_check(state->digest.high_level.evp_md5_secondary.ctx = S2N_EVP_MD_CTX_NEW());
+ POSIX_ENSURE_REF(state->digest.high_level.evp.ctx = S2N_EVP_MD_CTX_NEW());
+ if (s2n_use_custom_md5_sha1()) {
+ POSIX_ENSURE_REF(state->digest.high_level.evp_md5_secondary.ctx = S2N_EVP_MD_CTX_NEW());
+ }
+
state->is_ready_for_input = 0;
state->currently_in_hash = 0;
@@ -260,169 +305,122 @@ static int s2n_evp_hash_allow_md5_for_fips(struct s2n_hash_state *state)
* outside of the TLS 1.0 and 1.1 PRF when in FIPS mode. When needed, this must
* be called prior to s2n_hash_init().
*/
- GUARD(s2n_digest_allow_md5_for_fips(&state->digest.high_level.evp_md5_secondary));
- return s2n_digest_allow_md5_for_fips(&state->digest.high_level.evp);
+ POSIX_GUARD(s2n_digest_allow_md5_for_fips(&state->digest.high_level.evp));
+ if (s2n_use_custom_md5_sha1()) {
+ POSIX_GUARD(s2n_digest_allow_md5_for_fips(&state->digest.high_level.evp_md5_secondary));
+ }
+ return S2N_SUCCESS;
}
static int s2n_evp_hash_init(struct s2n_hash_state *state, s2n_hash_algorithm alg)
{
- notnull_check(state->digest.high_level.evp.ctx);
- notnull_check(state->digest.high_level.evp_md5_secondary.ctx);
- switch (alg) {
- case S2N_HASH_NONE:
- break;
- case S2N_HASH_MD5:
- GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp.ctx, EVP_md5(), NULL), S2N_ERR_HASH_INIT_FAILED);
- break;
- case S2N_HASH_SHA1:
- GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp.ctx, EVP_sha1(), NULL), S2N_ERR_HASH_INIT_FAILED);
- break;
- case S2N_HASH_SHA224:
- GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp.ctx, EVP_sha224(), NULL), S2N_ERR_HASH_INIT_FAILED);
- break;
- case S2N_HASH_SHA256:
- GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp.ctx, EVP_sha256(), NULL), S2N_ERR_HASH_INIT_FAILED);
- break;
- case S2N_HASH_SHA384:
- GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp.ctx, EVP_sha384(), NULL), S2N_ERR_HASH_INIT_FAILED);
- break;
- case S2N_HASH_SHA512:
- GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp.ctx, EVP_sha512(), NULL), S2N_ERR_HASH_INIT_FAILED);
- break;
- case S2N_HASH_MD5_SHA1:
- GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp.ctx, EVP_sha1(), NULL), S2N_ERR_HASH_INIT_FAILED);
- GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp_md5_secondary.ctx, EVP_md5(), NULL), S2N_ERR_HASH_INIT_FAILED);
- break;
- default:
- S2N_ERROR(S2N_ERR_HASH_INVALID_ALGORITHM);
- }
+ POSIX_ENSURE_REF(state->digest.high_level.evp.ctx);
state->alg = alg;
state->is_ready_for_input = 1;
state->currently_in_hash = 0;
+ if (alg == S2N_HASH_NONE) {
+ return S2N_SUCCESS;
+ }
+
+ if (alg == S2N_HASH_MD5_SHA1 && s2n_use_custom_md5_sha1()) {
+ POSIX_ENSURE_REF(state->digest.high_level.evp_md5_secondary.ctx);
+ POSIX_GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp.ctx, EVP_sha1(), NULL), S2N_ERR_HASH_INIT_FAILED);
+ POSIX_GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp_md5_secondary.ctx, EVP_md5(), NULL), S2N_ERR_HASH_INIT_FAILED);
+ return S2N_SUCCESS;
+ }
+
+ POSIX_ENSURE_REF(s2n_hash_alg_to_evp_md(alg));
+ POSIX_GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp.ctx, s2n_hash_alg_to_evp_md(alg), NULL), S2N_ERR_HASH_INIT_FAILED);
return S2N_SUCCESS;
}
static int s2n_evp_hash_update(struct s2n_hash_state *state, const void *data, uint32_t size)
{
- ENSURE_POSIX(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY);
+ POSIX_ENSURE(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY);
+ POSIX_ENSURE(size <= (UINT64_MAX - state->currently_in_hash), S2N_ERR_INTEGER_OVERFLOW);
+ state->currently_in_hash += size;
- switch (state->alg) {
- case S2N_HASH_NONE:
- break;
- case S2N_HASH_MD5:
- case S2N_HASH_SHA1:
- case S2N_HASH_SHA224:
- case S2N_HASH_SHA256:
- case S2N_HASH_SHA384:
- case S2N_HASH_SHA512:
- notnull_check(EVP_MD_CTX_md(state->digest.high_level.evp.ctx));
- GUARD_OSSL(EVP_DigestUpdate(state->digest.high_level.evp.ctx, data, size), S2N_ERR_HASH_UPDATE_FAILED);
- break;
- case S2N_HASH_MD5_SHA1:
- notnull_check(EVP_MD_CTX_md(state->digest.high_level.evp.ctx));
- notnull_check(EVP_MD_CTX_md(state->digest.high_level.evp_md5_secondary.ctx));
- GUARD_OSSL(EVP_DigestUpdate(state->digest.high_level.evp.ctx, data, size), S2N_ERR_HASH_UPDATE_FAILED);
- GUARD_OSSL(EVP_DigestUpdate(state->digest.high_level.evp_md5_secondary.ctx, data, size), S2N_ERR_HASH_UPDATE_FAILED);
- break;
- default:
- S2N_ERROR(S2N_ERR_HASH_INVALID_ALGORITHM);
+ if (state->alg == S2N_HASH_NONE) {
+ return S2N_SUCCESS;
}
- ENSURE_POSIX(size <= (UINT64_MAX - state->currently_in_hash), S2N_ERR_INTEGER_OVERFLOW);
- state->currently_in_hash += size;
+ POSIX_ENSURE_REF(EVP_MD_CTX_md(state->digest.high_level.evp.ctx));
+ POSIX_GUARD_OSSL(EVP_DigestUpdate(state->digest.high_level.evp.ctx, data, size), S2N_ERR_HASH_UPDATE_FAILED);
+
+ if (state->alg == S2N_HASH_MD5_SHA1 && s2n_use_custom_md5_sha1()) {
+ POSIX_ENSURE_REF(EVP_MD_CTX_md(state->digest.high_level.evp_md5_secondary.ctx));
+ POSIX_GUARD_OSSL(EVP_DigestUpdate(state->digest.high_level.evp_md5_secondary.ctx, data, size), S2N_ERR_HASH_UPDATE_FAILED);
+ }
return S2N_SUCCESS;
}
static int s2n_evp_hash_digest(struct s2n_hash_state *state, void *out, uint32_t size)
{
- ENSURE_POSIX(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY);
+ POSIX_ENSURE(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY);
+
+ state->currently_in_hash = 0;
+ state->is_ready_for_input = 0;
unsigned int digest_size = size;
- uint8_t expected_digest_size;
- GUARD(s2n_hash_digest_size(state->alg, &expected_digest_size));
- eq_check(digest_size, expected_digest_size);
+ uint8_t expected_digest_size = 0;
+ POSIX_GUARD(s2n_hash_digest_size(state->alg, &expected_digest_size));
+ POSIX_ENSURE_EQ(digest_size, expected_digest_size);
- /* Used for S2N_HASH_MD5_SHA1 case to specify the exact size of each digest. */
- uint8_t sha1_digest_size;
- unsigned int sha1_primary_digest_size;
- unsigned int md5_secondary_digest_size;
+ if (state->alg == S2N_HASH_NONE) {
+ return S2N_SUCCESS;
+ }
- switch (state->alg) {
- case S2N_HASH_NONE:
- break;
- case S2N_HASH_MD5:
- case S2N_HASH_SHA1:
- case S2N_HASH_SHA224:
- case S2N_HASH_SHA256:
- case S2N_HASH_SHA384:
- case S2N_HASH_SHA512:
- notnull_check(EVP_MD_CTX_md(state->digest.high_level.evp.ctx));
- ENSURE_POSIX(EVP_MD_CTX_size(state->digest.high_level.evp.ctx) <= digest_size, S2N_ERR_HASH_DIGEST_FAILED);
- GUARD_OSSL(EVP_DigestFinal_ex(state->digest.high_level.evp.ctx, out, &digest_size), S2N_ERR_HASH_DIGEST_FAILED);
- break;
- case S2N_HASH_MD5_SHA1:
- notnull_check(EVP_MD_CTX_md(state->digest.high_level.evp.ctx));
- notnull_check(EVP_MD_CTX_md(state->digest.high_level.evp_md5_secondary.ctx));
- GUARD(s2n_hash_digest_size(S2N_HASH_SHA1, &sha1_digest_size));
- sha1_primary_digest_size = sha1_digest_size;
- md5_secondary_digest_size = digest_size - sha1_primary_digest_size;
- ENSURE_POSIX(EVP_MD_CTX_size(state->digest.high_level.evp.ctx) <= sha1_digest_size, S2N_ERR_HASH_DIGEST_FAILED);
- ENSURE_POSIX(EVP_MD_CTX_size(state->digest.high_level.evp_md5_secondary.ctx) <= md5_secondary_digest_size, S2N_ERR_HASH_DIGEST_FAILED);
-
- GUARD_OSSL(EVP_DigestFinal_ex(state->digest.high_level.evp.ctx, ((uint8_t *) out) + MD5_DIGEST_LENGTH, &sha1_primary_digest_size), S2N_ERR_HASH_DIGEST_FAILED);
- GUARD_OSSL(EVP_DigestFinal_ex(state->digest.high_level.evp_md5_secondary.ctx, out, &md5_secondary_digest_size), S2N_ERR_HASH_DIGEST_FAILED);
- break;
- default:
- S2N_ERROR(S2N_ERR_HASH_INVALID_ALGORITHM);
+ POSIX_ENSURE_REF(EVP_MD_CTX_md(state->digest.high_level.evp.ctx));
+
+ if (state->alg == S2N_HASH_MD5_SHA1 && s2n_use_custom_md5_sha1()) {
+ POSIX_ENSURE_REF(EVP_MD_CTX_md(state->digest.high_level.evp_md5_secondary.ctx));
+
+ uint8_t sha1_digest_size = 0;
+ POSIX_GUARD(s2n_hash_digest_size(S2N_HASH_SHA1, &sha1_digest_size));
+
+ unsigned int sha1_primary_digest_size = sha1_digest_size;
+ unsigned int md5_secondary_digest_size = digest_size - sha1_primary_digest_size;
+
+ POSIX_ENSURE(EVP_MD_CTX_size(state->digest.high_level.evp.ctx) <= sha1_digest_size, S2N_ERR_HASH_DIGEST_FAILED);
+ POSIX_ENSURE(EVP_MD_CTX_size(state->digest.high_level.evp_md5_secondary.ctx) <= md5_secondary_digest_size, S2N_ERR_HASH_DIGEST_FAILED);
+
+ POSIX_GUARD_OSSL(EVP_DigestFinal_ex(state->digest.high_level.evp.ctx, ((uint8_t *) out) + MD5_DIGEST_LENGTH, &sha1_primary_digest_size), S2N_ERR_HASH_DIGEST_FAILED);
+ POSIX_GUARD_OSSL(EVP_DigestFinal_ex(state->digest.high_level.evp_md5_secondary.ctx, out, &md5_secondary_digest_size), S2N_ERR_HASH_DIGEST_FAILED);
+ return S2N_SUCCESS;
}
- state->currently_in_hash = 0;
- state->is_ready_for_input = 0;
+ POSIX_ENSURE(EVP_MD_CTX_size(state->digest.high_level.evp.ctx) <= digest_size, S2N_ERR_HASH_DIGEST_FAILED);
+ POSIX_GUARD_OSSL(EVP_DigestFinal_ex(state->digest.high_level.evp.ctx, out, &digest_size), S2N_ERR_HASH_DIGEST_FAILED);
return S2N_SUCCESS;
}
static int s2n_evp_hash_copy(struct s2n_hash_state *to, struct s2n_hash_state *from)
{
- bool is_md5_allowed_for_fips = false;
- switch (from->alg) {
- case S2N_HASH_NONE:
- break;
- case S2N_HASH_MD5:
- GUARD_AS_POSIX(s2n_digest_is_md5_allowed_for_fips(&from->digest.high_level.evp, &is_md5_allowed_for_fips));
- if (is_md5_allowed_for_fips) {
- GUARD(s2n_hash_allow_md5_for_fips(to));
- }
- FALL_THROUGH;
- case S2N_HASH_SHA1:
- case S2N_HASH_SHA224:
- case S2N_HASH_SHA256:
- case S2N_HASH_SHA384:
- case S2N_HASH_SHA512:
- notnull_check(to->digest.high_level.evp.ctx);
- GUARD_OSSL(EVP_MD_CTX_copy_ex(to->digest.high_level.evp.ctx, from->digest.high_level.evp.ctx), S2N_ERR_HASH_COPY_FAILED);
- break;
- case S2N_HASH_MD5_SHA1:
- notnull_check(to->digest.high_level.evp.ctx);
- notnull_check(to->digest.high_level.evp_md5_secondary.ctx);
- GUARD_AS_POSIX(s2n_digest_is_md5_allowed_for_fips(&from->digest.high_level.evp, &is_md5_allowed_for_fips));
- if (is_md5_allowed_for_fips) {
- GUARD(s2n_hash_allow_md5_for_fips(to));
- }
- GUARD_OSSL(EVP_MD_CTX_copy_ex(to->digest.high_level.evp.ctx, from->digest.high_level.evp.ctx), S2N_ERR_HASH_COPY_FAILED);
- GUARD_OSSL(EVP_MD_CTX_copy_ex(to->digest.high_level.evp_md5_secondary.ctx, from->digest.high_level.evp_md5_secondary.ctx), S2N_ERR_HASH_COPY_FAILED);
- break;
- default:
- S2N_ERROR(S2N_ERR_HASH_INVALID_ALGORITHM);
- }
-
to->hash_impl = from->hash_impl;
to->alg = from->alg;
to->is_ready_for_input = from->is_ready_for_input;
to->currently_in_hash = from->currently_in_hash;
+ if (from->alg == S2N_HASH_NONE) {
+ return S2N_SUCCESS;
+ }
+
+ POSIX_ENSURE_REF(to->digest.high_level.evp.ctx);
+ POSIX_GUARD_OSSL(EVP_MD_CTX_copy_ex(to->digest.high_level.evp.ctx, from->digest.high_level.evp.ctx), S2N_ERR_HASH_COPY_FAILED);
+
+ if (from->alg == S2N_HASH_MD5_SHA1 && s2n_use_custom_md5_sha1()) {
+ POSIX_ENSURE_REF(to->digest.high_level.evp_md5_secondary.ctx);
+ POSIX_GUARD_OSSL(EVP_MD_CTX_copy_ex(to->digest.high_level.evp_md5_secondary.ctx, from->digest.high_level.evp_md5_secondary.ctx), S2N_ERR_HASH_COPY_FAILED);
+ }
+
+ bool is_md5_allowed_for_fips = false;
+ POSIX_GUARD_RESULT(s2n_digest_is_md5_allowed_for_fips(&from->digest.high_level.evp, &is_md5_allowed_for_fips));
+ if (is_md5_allowed_for_fips && (from->alg == S2N_HASH_MD5 || from->alg == S2N_HASH_MD5_SHA1)) {
+ POSIX_GUARD(s2n_hash_allow_md5_for_fips(to));
+ }
return S2N_SUCCESS;
}
@@ -430,19 +428,18 @@ static int s2n_evp_hash_reset(struct s2n_hash_state *state)
{
int reset_md5_for_fips = 0;
bool is_md5_allowed_for_fips = false;
- GUARD_AS_POSIX(s2n_digest_is_md5_allowed_for_fips(&state->digest.high_level.evp, &is_md5_allowed_for_fips));
+ POSIX_GUARD_RESULT(s2n_digest_is_md5_allowed_for_fips(&state->digest.high_level.evp, &is_md5_allowed_for_fips));
if ((state->alg == S2N_HASH_MD5 || state->alg == S2N_HASH_MD5_SHA1) && is_md5_allowed_for_fips) {
reset_md5_for_fips = 1;
}
- GUARD_OSSL(S2N_EVP_MD_CTX_RESET(state->digest.high_level.evp.ctx), S2N_ERR_HASH_WIPE_FAILED);
-
- if (state->alg == S2N_HASH_MD5_SHA1) {
- GUARD_OSSL(S2N_EVP_MD_CTX_RESET(state->digest.high_level.evp_md5_secondary.ctx), S2N_ERR_HASH_WIPE_FAILED);
+ POSIX_GUARD_OSSL(S2N_EVP_MD_CTX_RESET(state->digest.high_level.evp.ctx), S2N_ERR_HASH_WIPE_FAILED);
+ if (state->alg == S2N_HASH_MD5_SHA1 && s2n_use_custom_md5_sha1()) {
+ POSIX_GUARD_OSSL(S2N_EVP_MD_CTX_RESET(state->digest.high_level.evp_md5_secondary.ctx), S2N_ERR_HASH_WIPE_FAILED);
}
if (reset_md5_for_fips) {
- GUARD(s2n_hash_allow_md5_for_fips(state));
+ POSIX_GUARD(s2n_hash_allow_md5_for_fips(state));
}
/* hash_init resets the ready_for_input and currently_in_hash fields. */
@@ -452,9 +449,13 @@ static int s2n_evp_hash_reset(struct s2n_hash_state *state)
static int s2n_evp_hash_free(struct s2n_hash_state *state)
{
S2N_EVP_MD_CTX_FREE(state->digest.high_level.evp.ctx);
- S2N_EVP_MD_CTX_FREE(state->digest.high_level.evp_md5_secondary.ctx);
state->digest.high_level.evp.ctx = NULL;
- state->digest.high_level.evp_md5_secondary.ctx = NULL;
+
+ if (s2n_use_custom_md5_sha1()) {
+ S2N_EVP_MD_CTX_FREE(state->digest.high_level.evp_md5_secondary.ctx);
+ state->digest.high_level.evp_md5_secondary.ctx = NULL;
+ }
+
state->is_ready_for_input = 0;
return S2N_SUCCESS;
}
@@ -483,104 +484,106 @@ static const struct s2n_hash s2n_evp_hash = {
static int s2n_hash_set_impl(struct s2n_hash_state *state)
{
- state->hash_impl = s2n_is_in_fips_mode() ? &s2n_evp_hash : &s2n_low_level_hash;
-
+ state->hash_impl = &s2n_low_level_hash;
+ if (s2n_use_evp_impl()) {
+ state->hash_impl = &s2n_evp_hash;
+ }
return S2N_SUCCESS;
}
int s2n_hash_new(struct s2n_hash_state *state)
{
- notnull_check(state);
+ POSIX_ENSURE_REF(state);
/* Set hash_impl on initial hash creation.
* When in FIPS mode, the EVP API's must be used for hashes.
*/
- GUARD(s2n_hash_set_impl(state));
+ POSIX_GUARD(s2n_hash_set_impl(state));
- notnull_check(state->hash_impl->alloc);
+ POSIX_ENSURE_REF(state->hash_impl->alloc);
- GUARD(state->hash_impl->alloc(state));
+ POSIX_GUARD(state->hash_impl->alloc(state));
return S2N_SUCCESS;
}
S2N_RESULT s2n_hash_state_validate(struct s2n_hash_state *state)
{
- ENSURE_REF(state);
+ RESULT_ENSURE_REF(state);
return S2N_RESULT_OK;
}
int s2n_hash_allow_md5_for_fips(struct s2n_hash_state *state)
{
- notnull_check(state);
+ POSIX_ENSURE_REF(state);
/* Ensure that hash_impl is set, as it may have been reset for s2n_hash_state on s2n_connection_wipe.
* When in FIPS mode, the EVP API's must be used for hashes.
*/
- GUARD(s2n_hash_set_impl(state));
+ POSIX_GUARD(s2n_hash_set_impl(state));
- notnull_check(state->hash_impl->allow_md5_for_fips);
+ POSIX_ENSURE_REF(state->hash_impl->allow_md5_for_fips);
return state->hash_impl->allow_md5_for_fips(state);
}
int s2n_hash_init(struct s2n_hash_state *state, s2n_hash_algorithm alg)
{
- notnull_check(state);
+ POSIX_ENSURE_REF(state);
/* Ensure that hash_impl is set, as it may have been reset for s2n_hash_state on s2n_connection_wipe.
* When in FIPS mode, the EVP API's must be used for hashes.
*/
- GUARD(s2n_hash_set_impl(state));
+ POSIX_GUARD(s2n_hash_set_impl(state));
bool is_md5_allowed_for_fips = false;
- GUARD_AS_POSIX(s2n_digest_is_md5_allowed_for_fips(&state->digest.high_level.evp, &is_md5_allowed_for_fips));
+ POSIX_GUARD_RESULT(s2n_digest_is_md5_allowed_for_fips(&state->digest.high_level.evp, &is_md5_allowed_for_fips));
if (s2n_hash_is_available(alg) ||
((alg == S2N_HASH_MD5 || alg == S2N_HASH_MD5_SHA1) && is_md5_allowed_for_fips)) {
/* s2n will continue to initialize an "unavailable" hash when s2n is in FIPS mode and
* FIPS is forcing the hash to be made available.
*/
- notnull_check(state->hash_impl->init);
+ POSIX_ENSURE_REF(state->hash_impl->init);
return state->hash_impl->init(state, alg);
} else {
- S2N_ERROR(S2N_ERR_HASH_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM);
}
}
int s2n_hash_update(struct s2n_hash_state *state, const void *data, uint32_t size)
{
- PRECONDITION_POSIX(s2n_hash_state_validate(state));
- ENSURE_POSIX(S2N_MEM_IS_READABLE(data, size), S2N_ERR_PRECONDITION_VIOLATION);
- notnull_check(state->hash_impl->update);
+ POSIX_PRECONDITION(s2n_hash_state_validate(state));
+ POSIX_ENSURE(S2N_MEM_IS_READABLE(data, size), S2N_ERR_PRECONDITION_VIOLATION);
+ POSIX_ENSURE_REF(state->hash_impl->update);
return state->hash_impl->update(state, data, size);
}
int s2n_hash_digest(struct s2n_hash_state *state, void *out, uint32_t size)
{
- PRECONDITION_POSIX(s2n_hash_state_validate(state));
- ENSURE_POSIX(S2N_MEM_IS_READABLE(out, size), S2N_ERR_PRECONDITION_VIOLATION);
- notnull_check(state->hash_impl->digest);
+ POSIX_PRECONDITION(s2n_hash_state_validate(state));
+ POSIX_ENSURE(S2N_MEM_IS_READABLE(out, size), S2N_ERR_PRECONDITION_VIOLATION);
+ POSIX_ENSURE_REF(state->hash_impl->digest);
return state->hash_impl->digest(state, out, size);
}
int s2n_hash_copy(struct s2n_hash_state *to, struct s2n_hash_state *from)
{
- PRECONDITION_POSIX(s2n_hash_state_validate(to));
- PRECONDITION_POSIX(s2n_hash_state_validate(from));
- notnull_check(from->hash_impl->copy);
+ POSIX_PRECONDITION(s2n_hash_state_validate(to));
+ POSIX_PRECONDITION(s2n_hash_state_validate(from));
+ POSIX_ENSURE_REF(from->hash_impl->copy);
return from->hash_impl->copy(to, from);
}
int s2n_hash_reset(struct s2n_hash_state *state)
{
- notnull_check(state);
+ POSIX_ENSURE_REF(state);
/* Ensure that hash_impl is set, as it may have been reset for s2n_hash_state on s2n_connection_wipe.
* When in FIPS mode, the EVP API's must be used for hashes.
*/
- GUARD(s2n_hash_set_impl(state));
+ POSIX_GUARD(s2n_hash_set_impl(state));
- notnull_check(state->hash_impl->reset);
+ POSIX_ENSURE_REF(state->hash_impl->reset);
return state->hash_impl->reset(state);
}
@@ -594,32 +597,31 @@ int s2n_hash_free(struct s2n_hash_state *state)
/* Ensure that hash_impl is set, as it may have been reset for s2n_hash_state on s2n_connection_wipe.
* When in FIPS mode, the EVP API's must be used for hashes.
*/
- GUARD(s2n_hash_set_impl(state));
+ POSIX_GUARD(s2n_hash_set_impl(state));
- notnull_check(state->hash_impl->free);
+ POSIX_ENSURE_REF(state->hash_impl->free);
return state->hash_impl->free(state);
}
int s2n_hash_get_currently_in_hash_total(struct s2n_hash_state *state, uint64_t *out)
{
- PRECONDITION_POSIX(s2n_hash_state_validate(state));
- ENSURE_POSIX(S2N_MEM_IS_WRITABLE(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION);
- ENSURE_POSIX(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY);
+ POSIX_PRECONDITION(s2n_hash_state_validate(state));
+ POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION);
+ POSIX_ENSURE(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY);
*out = state->currently_in_hash;
return S2N_SUCCESS;
}
-
/* Calculate, in constant time, the number of bytes currently in the hash_block */
int s2n_hash_const_time_get_currently_in_hash_block(struct s2n_hash_state *state, uint64_t *out)
{
- PRECONDITION_POSIX(s2n_hash_state_validate(state));
- ENSURE_POSIX(S2N_MEM_IS_WRITABLE(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION);
- ENSURE_POSIX(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY);
+ POSIX_PRECONDITION(s2n_hash_state_validate(state));
+ POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION);
+ POSIX_ENSURE(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY);
uint64_t hash_block_size;
- GUARD(s2n_hash_block_size(state->alg, &hash_block_size));
+ POSIX_GUARD(s2n_hash_block_size(state->alg, &hash_block_size));
/* Requires that hash_block_size is a power of 2. This is true for all hashes we currently support
* If this ever becomes untrue, this would require fixing this*/
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_hash.h b/contrib/restricted/aws/s2n/crypto/s2n_hash.h
index 2ca40e77ca..6ae9b13386 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_hash.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_hash.h
@@ -87,6 +87,8 @@ struct s2n_hash {
int (*free) (struct s2n_hash_state *state);
};
+bool s2n_hash_evp_fully_supported();
+const EVP_MD* s2n_hash_alg_to_evp_md(s2n_hash_algorithm alg);
extern int s2n_hash_digest_size(s2n_hash_algorithm alg, uint8_t *out);
extern int s2n_hash_block_size(s2n_hash_algorithm alg, uint64_t *block_size);
extern bool s2n_hash_is_available(s2n_hash_algorithm alg);
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_hkdf.c b/contrib/restricted/aws/s2n/crypto/s2n_hkdf.c
index cefd528fdf..eda68149d8 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_hkdf.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_hkdf.c
@@ -34,13 +34,13 @@ int s2n_hkdf_extract(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const
const struct s2n_blob *key, struct s2n_blob *pseudo_rand_key)
{
uint8_t hmac_size;
- GUARD(s2n_hmac_digest_size(alg, &hmac_size));
+ POSIX_GUARD(s2n_hmac_digest_size(alg, &hmac_size));
pseudo_rand_key->size = hmac_size;
- GUARD(s2n_hmac_init(hmac, alg, salt->data, salt->size));
- GUARD(s2n_hmac_update(hmac, key->data, key->size));
- GUARD(s2n_hmac_digest(hmac, pseudo_rand_key->data, pseudo_rand_key->size));
+ POSIX_GUARD(s2n_hmac_init(hmac, alg, salt->data, salt->size));
+ POSIX_GUARD(s2n_hmac_update(hmac, key->data, key->size));
+ POSIX_GUARD(s2n_hmac_digest(hmac, pseudo_rand_key->data, pseudo_rand_key->size));
- GUARD(s2n_hmac_reset(hmac));
+ POSIX_GUARD(s2n_hmac_reset(hmac));
return 0;
}
@@ -51,8 +51,9 @@ static int s2n_hkdf_expand(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg,
uint8_t prev[MAX_DIGEST_SIZE] = { 0 };
uint32_t done_len = 0;
- uint8_t hash_len;
- GUARD(s2n_hmac_digest_size(alg, &hash_len));
+ uint8_t hash_len = 0;
+ POSIX_GUARD(s2n_hmac_digest_size(alg, &hash_len));
+ POSIX_ENSURE_GT(hash_len, 0);
uint32_t total_rounds = output->size / hash_len;
if (output->size % hash_len) {
total_rounds++;
@@ -62,24 +63,24 @@ static int s2n_hkdf_expand(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg,
for (uint32_t curr_round = 1; curr_round <= total_rounds; curr_round++) {
uint32_t cat_len;
- GUARD(s2n_hmac_init(hmac, alg, pseudo_rand_key->data, pseudo_rand_key->size));
+ POSIX_GUARD(s2n_hmac_init(hmac, alg, pseudo_rand_key->data, pseudo_rand_key->size));
if (curr_round != 1) {
- GUARD(s2n_hmac_update(hmac, prev, hash_len));
+ POSIX_GUARD(s2n_hmac_update(hmac, prev, hash_len));
}
- GUARD(s2n_hmac_update(hmac, info->data, info->size));
- GUARD(s2n_hmac_update(hmac, &curr_round, 1));
- GUARD(s2n_hmac_digest(hmac, prev, hash_len));
+ POSIX_GUARD(s2n_hmac_update(hmac, info->data, info->size));
+ POSIX_GUARD(s2n_hmac_update(hmac, &curr_round, 1));
+ POSIX_GUARD(s2n_hmac_digest(hmac, prev, hash_len));
cat_len = hash_len;
if (done_len + hash_len > output->size) {
cat_len = output->size - done_len;
}
- memcpy_check(output->data + done_len, prev, cat_len);
+ POSIX_CHECKED_MEMCPY(output->data + done_len, prev, cat_len);
done_len += cat_len;
- GUARD(s2n_hmac_reset(hmac));
+ POSIX_GUARD(s2n_hmac_reset(hmac));
}
return 0;
@@ -96,19 +97,19 @@ int s2n_hkdf_expand_label(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, c
/* RFC8446 specifies that labels must be 12 characters or less, to avoid
** incurring two hash rounds.
*/
- lte_check(label->size, 12);
+ POSIX_ENSURE_LTE(label->size, 12);
- GUARD(s2n_blob_init(&hkdf_label_blob, hkdf_label_buf, sizeof(hkdf_label_buf)));
- GUARD(s2n_stuffer_init(&hkdf_label, &hkdf_label_blob));
- GUARD(s2n_stuffer_write_uint16(&hkdf_label, output->size));
- GUARD(s2n_stuffer_write_uint8(&hkdf_label, label->size + sizeof("tls13 ") - 1));
- GUARD(s2n_stuffer_write_str(&hkdf_label, "tls13 "));
- GUARD(s2n_stuffer_write(&hkdf_label, label));
- GUARD(s2n_stuffer_write_uint8(&hkdf_label, context->size));
- GUARD(s2n_stuffer_write(&hkdf_label, context));
+ POSIX_GUARD(s2n_blob_init(&hkdf_label_blob, hkdf_label_buf, sizeof(hkdf_label_buf)));
+ POSIX_GUARD(s2n_stuffer_init(&hkdf_label, &hkdf_label_blob));
+ POSIX_GUARD(s2n_stuffer_write_uint16(&hkdf_label, output->size));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&hkdf_label, label->size + sizeof("tls13 ") - 1));
+ POSIX_GUARD(s2n_stuffer_write_str(&hkdf_label, "tls13 "));
+ POSIX_GUARD(s2n_stuffer_write(&hkdf_label, label));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&hkdf_label, context->size));
+ POSIX_GUARD(s2n_stuffer_write(&hkdf_label, context));
hkdf_label_blob.size = s2n_stuffer_data_available(&hkdf_label);
- GUARD(s2n_hkdf_expand(hmac, alg, secret, &hkdf_label_blob, output));
+ POSIX_GUARD(s2n_hkdf_expand(hmac, alg, secret, &hkdf_label_blob, output));
return 0;
}
@@ -119,8 +120,8 @@ int s2n_hkdf(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s
uint8_t prk_pad[MAX_DIGEST_SIZE];
struct s2n_blob pseudo_rand_key = {.data = prk_pad,.size = sizeof(prk_pad) };
- GUARD(s2n_hkdf_extract(hmac, alg, salt, key, &pseudo_rand_key));
- GUARD(s2n_hkdf_expand(hmac, alg, &pseudo_rand_key, info, output));
+ POSIX_GUARD(s2n_hkdf_extract(hmac, alg, salt, key, &pseudo_rand_key));
+ POSIX_GUARD(s2n_hkdf_expand(hmac, alg, &pseudo_rand_key, info, output));
return 0;
}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_hmac.c b/contrib/restricted/aws/s2n/crypto/s2n_hmac.c
index 2689545abb..29ded952ce 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_hmac.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_hmac.c
@@ -30,7 +30,7 @@
int s2n_hash_hmac_alg(s2n_hash_algorithm hash_alg, s2n_hmac_algorithm *out)
{
- ENSURE_POSIX(S2N_MEM_IS_WRITABLE(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION);
+ POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION);
switch(hash_alg) {
case S2N_HASH_NONE: *out = S2N_HMAC_NONE; break;
case S2N_HASH_MD5: *out = S2N_HMAC_MD5; break;
@@ -41,14 +41,14 @@ int s2n_hash_hmac_alg(s2n_hash_algorithm hash_alg, s2n_hmac_algorithm *out)
case S2N_HASH_SHA512: *out = S2N_HMAC_SHA512; break;
case S2N_HASH_MD5_SHA1: /* Fall through ... */
default:
- S2N_ERROR(S2N_ERR_HASH_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM);
}
return S2N_SUCCESS;
}
int s2n_hmac_hash_alg(s2n_hmac_algorithm hmac_alg, s2n_hash_algorithm *out)
{
- ENSURE_POSIX(S2N_MEM_IS_WRITABLE(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION);
+ POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION);
switch(hmac_alg) {
case S2N_HMAC_NONE: *out = S2N_HASH_NONE; break;
case S2N_HMAC_MD5: *out = S2N_HASH_MD5; break;
@@ -60,7 +60,7 @@ int s2n_hmac_hash_alg(s2n_hmac_algorithm hmac_alg, s2n_hash_algorithm *out)
case S2N_HMAC_SSLv3_MD5: *out = S2N_HASH_MD5; break;
case S2N_HMAC_SSLv3_SHA1: *out = S2N_HASH_SHA1; break;
default:
- S2N_ERROR(S2N_ERR_HMAC_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HMAC_INVALID_ALGORITHM);
}
return S2N_SUCCESS;
}
@@ -68,8 +68,8 @@ int s2n_hmac_hash_alg(s2n_hmac_algorithm hmac_alg, s2n_hash_algorithm *out)
int s2n_hmac_digest_size(s2n_hmac_algorithm hmac_alg, uint8_t *out)
{
s2n_hash_algorithm hash_alg;
- GUARD(s2n_hmac_hash_alg(hmac_alg, &hash_alg));
- GUARD(s2n_hash_digest_size(hash_alg, out));
+ POSIX_GUARD(s2n_hmac_hash_alg(hmac_alg, &hash_alg));
+ POSIX_GUARD(s2n_hash_digest_size(hash_alg, out));
return S2N_SUCCESS;
}
@@ -99,15 +99,15 @@ static int s2n_sslv3_mac_init(struct s2n_hmac_state *state, s2n_hmac_algorithm a
state->xor_pad[i] = 0x36;
}
- GUARD(s2n_hash_update(&state->inner_just_key, key, klen));
- GUARD(s2n_hash_update(&state->inner_just_key, state->xor_pad, state->xor_pad_size));
+ POSIX_GUARD(s2n_hash_update(&state->inner_just_key, key, klen));
+ POSIX_GUARD(s2n_hash_update(&state->inner_just_key, state->xor_pad, state->xor_pad_size));
for (int i = 0; i < state->xor_pad_size; i++) {
state->xor_pad[i] = 0x5c;
}
- GUARD(s2n_hash_update(&state->outer_just_key, key, klen));
- GUARD(s2n_hash_update(&state->outer_just_key, state->xor_pad, state->xor_pad_size));
+ POSIX_GUARD(s2n_hash_update(&state->outer_just_key, key, klen));
+ POSIX_GUARD(s2n_hash_update(&state->outer_just_key, state->xor_pad, state->xor_pad_size));
return S2N_SUCCESS;
}
@@ -117,31 +117,31 @@ static int s2n_tls_hmac_init(struct s2n_hmac_state *state, s2n_hmac_algorithm al
memset(&state->xor_pad, 0, sizeof(state->xor_pad));
if (klen > state->xor_pad_size) {
- GUARD(s2n_hash_update(&state->outer, key, klen));
- GUARD(s2n_hash_digest(&state->outer, state->digest_pad, state->digest_size));
- memcpy_check(state->xor_pad, state->digest_pad, state->digest_size);
+ POSIX_GUARD(s2n_hash_update(&state->outer, key, klen));
+ POSIX_GUARD(s2n_hash_digest(&state->outer, state->digest_pad, state->digest_size));
+ POSIX_CHECKED_MEMCPY(state->xor_pad, state->digest_pad, state->digest_size);
} else {
- memcpy_check(state->xor_pad, key, klen);
+ POSIX_CHECKED_MEMCPY(state->xor_pad, key, klen);
}
for (int i = 0; i < state->xor_pad_size; i++) {
state->xor_pad[i] ^= 0x36;
}
- GUARD(s2n_hash_update(&state->inner_just_key, state->xor_pad, state->xor_pad_size));
+ POSIX_GUARD(s2n_hash_update(&state->inner_just_key, state->xor_pad, state->xor_pad_size));
/* 0x36 xor 0x5c == 0x6a */
for (int i = 0; i < state->xor_pad_size; i++) {
state->xor_pad[i] ^= 0x6a;
}
- GUARD(s2n_hash_update(&state->outer_just_key, state->xor_pad, state->xor_pad_size));
+ POSIX_GUARD(s2n_hash_update(&state->outer_just_key, state->xor_pad, state->xor_pad_size));
return S2N_SUCCESS;
}
int s2n_hmac_xor_pad_size(s2n_hmac_algorithm hmac_alg, uint16_t *xor_pad_size)
{
- ENSURE_POSIX(S2N_MEM_IS_WRITABLE(xor_pad_size, sizeof(*xor_pad_size)), S2N_ERR_PRECONDITION_VIOLATION);
+ POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(xor_pad_size, sizeof(*xor_pad_size)), S2N_ERR_PRECONDITION_VIOLATION);
switch(hmac_alg) {
case S2N_HMAC_NONE: *xor_pad_size = 64; break;
case S2N_HMAC_MD5: *xor_pad_size = 64; break;
@@ -153,14 +153,14 @@ int s2n_hmac_xor_pad_size(s2n_hmac_algorithm hmac_alg, uint16_t *xor_pad_size)
case S2N_HMAC_SSLv3_MD5: *xor_pad_size = 48; break;
case S2N_HMAC_SSLv3_SHA1: *xor_pad_size = 40; break;
default:
- S2N_ERROR(S2N_ERR_HMAC_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HMAC_INVALID_ALGORITHM);
}
return S2N_SUCCESS;
}
int s2n_hmac_hash_block_size(s2n_hmac_algorithm hmac_alg, uint16_t *block_size)
{
- ENSURE_POSIX(S2N_MEM_IS_WRITABLE(block_size, sizeof(*block_size)), S2N_ERR_PRECONDITION_VIOLATION);
+ POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(block_size, sizeof(*block_size)), S2N_ERR_PRECONDITION_VIOLATION);
switch(hmac_alg) {
case S2N_HMAC_NONE: *block_size = 64; break;
case S2N_HMAC_MD5: *block_size = 64; break;
@@ -172,63 +172,63 @@ int s2n_hmac_hash_block_size(s2n_hmac_algorithm hmac_alg, uint16_t *block_size)
case S2N_HMAC_SSLv3_MD5: *block_size = 64; break;
case S2N_HMAC_SSLv3_SHA1: *block_size = 64; break;
default:
- S2N_ERROR(S2N_ERR_HMAC_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HMAC_INVALID_ALGORITHM);
}
return S2N_SUCCESS;
}
int s2n_hmac_new(struct s2n_hmac_state *state)
{
- ENSURE_POSIX_REF(state);
- GUARD(s2n_hash_new(&state->inner));
- GUARD(s2n_hash_new(&state->inner_just_key));
- GUARD(s2n_hash_new(&state->outer));
- GUARD(s2n_hash_new(&state->outer_just_key));
- POSTCONDITION_POSIX(s2n_hmac_state_validate(state));
+ POSIX_ENSURE_REF(state);
+ POSIX_GUARD(s2n_hash_new(&state->inner));
+ POSIX_GUARD(s2n_hash_new(&state->inner_just_key));
+ POSIX_GUARD(s2n_hash_new(&state->outer));
+ POSIX_GUARD(s2n_hash_new(&state->outer_just_key));
+ POSIX_POSTCONDITION(s2n_hmac_state_validate(state));
return S2N_SUCCESS;
}
S2N_RESULT s2n_hmac_state_validate(struct s2n_hmac_state *state)
{
- ENSURE_REF(state);
- GUARD_RESULT(s2n_hash_state_validate(&state->inner));
- GUARD_RESULT(s2n_hash_state_validate(&state->inner_just_key));
- GUARD_RESULT(s2n_hash_state_validate(&state->outer));
- GUARD_RESULT(s2n_hash_state_validate(&state->outer_just_key));
+ RESULT_ENSURE_REF(state);
+ RESULT_GUARD(s2n_hash_state_validate(&state->inner));
+ RESULT_GUARD(s2n_hash_state_validate(&state->inner_just_key));
+ RESULT_GUARD(s2n_hash_state_validate(&state->outer));
+ RESULT_GUARD(s2n_hash_state_validate(&state->outer_just_key));
return S2N_RESULT_OK;
}
int s2n_hmac_init(struct s2n_hmac_state *state, s2n_hmac_algorithm alg, const void *key, uint32_t klen)
{
- notnull_check(state);
+ POSIX_ENSURE_REF(state);
if (!s2n_hmac_is_available(alg)) {
/* Prevent hmacs from being used if they are not available. */
- S2N_ERROR(S2N_ERR_HMAC_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HMAC_INVALID_ALGORITHM);
}
state->alg = alg;
- GUARD(s2n_hmac_hash_block_size(alg, &state->hash_block_size));
+ POSIX_GUARD(s2n_hmac_hash_block_size(alg, &state->hash_block_size));
state->currently_in_hash_block = 0;
- GUARD(s2n_hmac_xor_pad_size(alg, &state->xor_pad_size));
- GUARD(s2n_hmac_digest_size(alg, &state->digest_size));
+ POSIX_GUARD(s2n_hmac_xor_pad_size(alg, &state->xor_pad_size));
+ POSIX_GUARD(s2n_hmac_digest_size(alg, &state->digest_size));
- gte_check(sizeof(state->xor_pad), state->xor_pad_size);
- gte_check(sizeof(state->digest_pad), state->digest_size);
+ POSIX_ENSURE_GTE(sizeof(state->xor_pad), state->xor_pad_size);
+ POSIX_ENSURE_GTE(sizeof(state->digest_pad), state->digest_size);
/* key needs to be as large as the biggest block size */
- gte_check(sizeof(state->xor_pad), state->hash_block_size);
+ POSIX_ENSURE_GTE(sizeof(state->xor_pad), state->hash_block_size);
s2n_hash_algorithm hash_alg;
- GUARD(s2n_hmac_hash_alg(alg, &hash_alg));
+ POSIX_GUARD(s2n_hmac_hash_alg(alg, &hash_alg));
- GUARD(s2n_hash_init(&state->inner, hash_alg));
- GUARD(s2n_hash_init(&state->inner_just_key, hash_alg));
- GUARD(s2n_hash_init(&state->outer, hash_alg));
- GUARD(s2n_hash_init(&state->outer_just_key, hash_alg));
+ POSIX_GUARD(s2n_hash_init(&state->inner, hash_alg));
+ POSIX_GUARD(s2n_hash_init(&state->inner_just_key, hash_alg));
+ POSIX_GUARD(s2n_hash_init(&state->outer, hash_alg));
+ POSIX_GUARD(s2n_hash_init(&state->outer_just_key, hash_alg));
if (alg == S2N_HMAC_SSLv3_SHA1 || alg == S2N_HMAC_SSLv3_MD5) {
- GUARD(s2n_sslv3_mac_init(state, alg, key, klen));
+ POSIX_GUARD(s2n_sslv3_mac_init(state, alg, key, klen));
} else {
- GUARD(s2n_tls_hmac_init(state, alg, key, klen));
+ POSIX_GUARD(s2n_tls_hmac_init(state, alg, key, klen));
}
/* Once we have produced inner_just_key and outer_just_key, don't need the key material in xor_pad, so wipe it.
@@ -236,15 +236,15 @@ int s2n_hmac_init(struct s2n_hmac_state *state, s2n_hmac_algorithm alg, const vo
* this also prevents uninitilized bytes being used.
*/
memset(&state->xor_pad, 0, sizeof(state->xor_pad));
- GUARD(s2n_hmac_reset(state));
+ POSIX_GUARD(s2n_hmac_reset(state));
return S2N_SUCCESS;
}
int s2n_hmac_update(struct s2n_hmac_state *state, const void *in, uint32_t size)
{
- PRECONDITION_POSIX(s2n_hmac_state_validate(state));
- ENSURE_POSIX(state->hash_block_size != 0, S2N_ERR_PRECONDITION_VIOLATION);
+ POSIX_PRECONDITION(s2n_hmac_state_validate(state));
+ POSIX_ENSURE(state->hash_block_size != 0, S2N_ERR_PRECONDITION_VIOLATION);
/* Keep track of how much of the current hash block is full
*
* Why the 4294949760 constant in this code? 4294949760 is the highest 32-bit
@@ -267,9 +267,9 @@ int s2n_hmac_update(struct s2n_hmac_state *state, const void *in, uint32_t size)
* smaller number of cycles if the input is "small".
*/
const uint32_t HIGHEST_32_BIT = 4294949760;
- ENSURE_POSIX(size <= (UINT32_MAX - HIGHEST_32_BIT), S2N_ERR_INTEGER_OVERFLOW);
+ POSIX_ENSURE(size <= (UINT32_MAX - HIGHEST_32_BIT), S2N_ERR_INTEGER_OVERFLOW);
uint32_t value = (HIGHEST_32_BIT + size) % state->hash_block_size;
- GUARD(s2n_add_overflow(state->currently_in_hash_block, value, &state->currently_in_hash_block));
+ POSIX_GUARD(s2n_add_overflow(state->currently_in_hash_block, value, &state->currently_in_hash_block));
state->currently_in_hash_block %= state->hash_block_size;
return s2n_hash_update(&state->inner, in, size);
@@ -277,10 +277,10 @@ int s2n_hmac_update(struct s2n_hmac_state *state, const void *in, uint32_t size)
int s2n_hmac_digest(struct s2n_hmac_state *state, void *out, uint32_t size)
{
- PRECONDITION_POSIX(s2n_hmac_state_validate(state));
- GUARD(s2n_hash_digest(&state->inner, state->digest_pad, state->digest_size));
- GUARD(s2n_hash_copy(&state->outer, &state->outer_just_key));
- GUARD(s2n_hash_update(&state->outer, state->digest_pad, state->digest_size));
+ POSIX_PRECONDITION(s2n_hmac_state_validate(state));
+ POSIX_GUARD(s2n_hash_digest(&state->inner, state->digest_pad, state->digest_size));
+ POSIX_GUARD(s2n_hash_copy(&state->outer, &state->outer_just_key));
+ POSIX_GUARD(s2n_hash_update(&state->outer, state->digest_pad, state->digest_size));
return s2n_hash_digest(&state->outer, out, size);
}
@@ -288,7 +288,7 @@ int s2n_hmac_digest(struct s2n_hmac_state *state, void *out, uint32_t size)
int s2n_hmac_digest_two_compression_rounds(struct s2n_hmac_state *state, void *out, uint32_t size)
{
/* Do the "real" work of this function. */
- GUARD(s2n_hmac_digest(state, out, size));
+ POSIX_GUARD(s2n_hmac_digest(state, out, size));
/* If there were 9 or more bytes of space left in the current hash block
* then the serialized length, plus an 0x80 byte, will have fit in that block.
@@ -304,7 +304,7 @@ int s2n_hmac_digest_two_compression_rounds(struct s2n_hmac_state *state, void *o
}
/* Can't reuse a hash after it has been finalized, so reset and push another block in */
- GUARD(s2n_hash_reset(&state->inner));
+ POSIX_GUARD(s2n_hash_reset(&state->inner));
/* No-op s2n_hash_update to normalize timing and guard against Lucky13. This does not affect the value of *out. */
return s2n_hash_update(&state->inner, state->xor_pad, state->hash_block_size);
@@ -313,10 +313,10 @@ int s2n_hmac_digest_two_compression_rounds(struct s2n_hmac_state *state, void *o
int s2n_hmac_free(struct s2n_hmac_state *state)
{
if (state) {
- GUARD(s2n_hash_free(&state->inner));
- GUARD(s2n_hash_free(&state->inner_just_key));
- GUARD(s2n_hash_free(&state->outer));
- GUARD(s2n_hash_free(&state->outer_just_key));
+ POSIX_GUARD(s2n_hash_free(&state->inner));
+ POSIX_GUARD(s2n_hash_free(&state->inner_just_key));
+ POSIX_GUARD(s2n_hash_free(&state->outer));
+ POSIX_GUARD(s2n_hash_free(&state->outer_just_key));
}
return S2N_SUCCESS;
@@ -324,14 +324,14 @@ int s2n_hmac_free(struct s2n_hmac_state *state)
int s2n_hmac_reset(struct s2n_hmac_state *state)
{
- PRECONDITION_POSIX(s2n_hmac_state_validate(state));
- ENSURE_POSIX(state->hash_block_size != 0, S2N_ERR_PRECONDITION_VIOLATION);
- GUARD(s2n_hash_copy(&state->inner, &state->inner_just_key));
+ POSIX_PRECONDITION(s2n_hmac_state_validate(state));
+ POSIX_ENSURE(state->hash_block_size != 0, S2N_ERR_PRECONDITION_VIOLATION);
+ POSIX_GUARD(s2n_hash_copy(&state->inner, &state->inner_just_key));
uint64_t bytes_in_hash;
- GUARD(s2n_hash_get_currently_in_hash_total(&state->inner, &bytes_in_hash));
+ POSIX_GUARD(s2n_hash_get_currently_in_hash_total(&state->inner, &bytes_in_hash));
bytes_in_hash %= state->hash_block_size;
- ENSURE_POSIX(bytes_in_hash <= UINT32_MAX, S2N_ERR_INTEGER_OVERFLOW);
+ POSIX_ENSURE(bytes_in_hash <= UINT32_MAX, S2N_ERR_INTEGER_OVERFLOW);
/* The length of the key is not private, so don't need to do tricky math here */
state->currently_in_hash_block = bytes_in_hash;
return S2N_SUCCESS;
@@ -344,8 +344,8 @@ int s2n_hmac_digest_verify(const void *a, const void *b, uint32_t len)
int s2n_hmac_copy(struct s2n_hmac_state *to, struct s2n_hmac_state *from)
{
- PRECONDITION_POSIX(s2n_hmac_state_validate(to));
- PRECONDITION_POSIX(s2n_hmac_state_validate(from));
+ POSIX_PRECONDITION(s2n_hmac_state_validate(to));
+ POSIX_PRECONDITION(s2n_hmac_state_validate(from));
/* memcpy cannot be used on s2n_hmac_state as the underlying s2n_hash implementation's
* copy must be used. This is enforced when the s2n_hash implementation is s2n_evp_hash.
*/
@@ -355,16 +355,16 @@ int s2n_hmac_copy(struct s2n_hmac_state *to, struct s2n_hmac_state *from)
to->xor_pad_size = from->xor_pad_size;
to->digest_size = from->digest_size;
- GUARD(s2n_hash_copy(&to->inner, &from->inner));
- GUARD(s2n_hash_copy(&to->inner_just_key, &from->inner_just_key));
- GUARD(s2n_hash_copy(&to->outer, &from->outer));
- GUARD(s2n_hash_copy(&to->outer_just_key, &from->outer_just_key));
+ POSIX_GUARD(s2n_hash_copy(&to->inner, &from->inner));
+ POSIX_GUARD(s2n_hash_copy(&to->inner_just_key, &from->inner_just_key));
+ POSIX_GUARD(s2n_hash_copy(&to->outer, &from->outer));
+ POSIX_GUARD(s2n_hash_copy(&to->outer_just_key, &from->outer_just_key));
- memcpy_check(to->xor_pad, from->xor_pad, sizeof(to->xor_pad));
- memcpy_check(to->digest_pad, from->digest_pad, sizeof(to->digest_pad));
- POSTCONDITION_POSIX(s2n_hmac_state_validate(to));
- POSTCONDITION_POSIX(s2n_hmac_state_validate(from));
+ POSIX_CHECKED_MEMCPY(to->xor_pad, from->xor_pad, sizeof(to->xor_pad));
+ POSIX_CHECKED_MEMCPY(to->digest_pad, from->digest_pad, sizeof(to->digest_pad));
+ POSIX_POSTCONDITION(s2n_hmac_state_validate(to));
+ POSIX_POSTCONDITION(s2n_hmac_state_validate(from));
return S2N_SUCCESS;
}
@@ -374,8 +374,8 @@ int s2n_hmac_copy(struct s2n_hmac_state *to, struct s2n_hmac_state *from)
*/
int s2n_hmac_save_evp_hash_state(struct s2n_hmac_evp_backup* backup, struct s2n_hmac_state* hmac)
{
- ENSURE_POSIX_REF(backup);
- PRECONDITION_POSIX(s2n_hmac_state_validate(hmac));
+ POSIX_ENSURE_REF(backup);
+ POSIX_PRECONDITION(s2n_hmac_state_validate(hmac));
backup->inner = hmac->inner.digest.high_level;
backup->inner_just_key = hmac->inner_just_key.digest.high_level;
backup->outer = hmac->outer.digest.high_level;
@@ -385,12 +385,12 @@ int s2n_hmac_save_evp_hash_state(struct s2n_hmac_evp_backup* backup, struct s2n_
int s2n_hmac_restore_evp_hash_state(struct s2n_hmac_evp_backup* backup, struct s2n_hmac_state* hmac)
{
- ENSURE_POSIX_REF(backup);
- PRECONDITION_POSIX(s2n_hmac_state_validate(hmac));
+ POSIX_ENSURE_REF(backup);
+ POSIX_PRECONDITION(s2n_hmac_state_validate(hmac));
hmac->inner.digest.high_level = backup->inner;
hmac->inner_just_key.digest.high_level = backup->inner_just_key;
hmac->outer.digest.high_level = backup->outer;
hmac->outer_just_key.digest.high_level = backup->outer_just_key;
- POSTCONDITION_POSIX(s2n_hmac_state_validate(hmac));
+ POSIX_POSTCONDITION(s2n_hmac_state_validate(hmac));
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_hmac.h b/contrib/restricted/aws/s2n/crypto/s2n_hmac.h
index 34f8314b2d..1a3d52a343 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_hmac.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_hmac.h
@@ -44,7 +44,6 @@ struct s2n_hmac_state {
struct s2n_hash_state outer;
struct s2n_hash_state outer_just_key;
-
/* key needs to be as large as the biggest block size */
uint8_t xor_pad[128];
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_libcrypto.c b/contrib/restricted/aws/s2n/crypto/s2n_libcrypto.c
new file mode 100644
index 0000000000..2fa5b003de
--- /dev/null
+++ b/contrib/restricted/aws/s2n/crypto/s2n_libcrypto.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include "crypto/s2n_crypto.h"
+#include "crypto/s2n_openssl.h"
+
+/*
+ * Verifying libcrypto type via method should be preferred
+ * where possible, since it reduces #ifs and avoids potential
+ * bugs where the header containing the #define is not included.
+ */
+
+bool s2n_libcrypto_is_awslc()
+{
+#if defined(OPENSSL_IS_AWSLC)
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool s2n_libcrypto_is_boringssl()
+{
+#if defined(OPENSSL_IS_BORINGSSL)
+ return true;
+#else
+ return false;
+#endif
+}
+
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_openssl.h b/contrib/restricted/aws/s2n/crypto/s2n_openssl.h
index 92c53c26fd..a6cebf982f 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_openssl.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_openssl.h
@@ -15,6 +15,8 @@
#pragma once
+#include <stdbool.h>
+
/**
* openssl with OPENSSL_VERSION_NUMBER < 0x10100003L made data type details unavailable
* libressl use openssl with data type details available, but mandatorily set
@@ -37,13 +39,18 @@
(OPENSSL_VERSION_NUMBER >= ((major << 28) + (minor << 20) + (fix << 12)))
#if (S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 0)) && (!defined(OPENSSL_IS_BORINGSSL)) && (!defined(OPENSSL_IS_AWSLC))
-#define s2n_evp_ctx_init(ctx) GUARD_OSSL(EVP_CIPHER_CTX_init(ctx), S2N_ERR_DRBG)
+#define s2n_evp_ctx_init(ctx) POSIX_GUARD_OSSL(EVP_CIPHER_CTX_init(ctx), S2N_ERR_DRBG)
+#define RESULT_EVP_CTX_INIT(ctx) RESULT_GUARD_OSSL(EVP_CIPHER_CTX_init(ctx), S2N_ERR_DRBG)
#else
#define s2n_evp_ctx_init(ctx) EVP_CIPHER_CTX_init(ctx)
+#define RESULT_EVP_CTX_INIT(ctx) EVP_CIPHER_CTX_init(ctx)
#endif
-#if !defined(OPENSSL_IS_BORINGSSL) && !defined(OPENSSL_FIPS) && !defined(LIBRESSL_VERSION_NUMBER) && !defined(OPENSSL_IS_AWSLC)
+#if !defined(OPENSSL_IS_BORINGSSL) && !defined(OPENSSL_FIPS) && !defined(LIBRESSL_VERSION_NUMBER) && !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_NO_ENGINE)
#define S2N_LIBCRYPTO_SUPPORTS_CUSTOM_RAND 1
#else
#define S2N_LIBCRYPTO_SUPPORTS_CUSTOM_RAND 0
#endif
+
+bool s2n_libcrypto_is_awslc();
+bool s2n_libcrypto_is_boringssl();
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_openssl_x509.c b/contrib/restricted/aws/s2n/crypto/s2n_openssl_x509.c
new file mode 100644
index 0000000000..a61805cee7
--- /dev/null
+++ b/contrib/restricted/aws/s2n/crypto/s2n_openssl_x509.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include "api/s2n.h"
+#include "crypto/s2n_openssl_x509.h"
+
+int s2n_openssl_x509_stack_pop_free(STACK_OF(X509) **cert_chain)
+{
+ if (*cert_chain != NULL) {
+ sk_X509_pop_free(*cert_chain, X509_free);
+ }
+ return S2N_SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_openssl_x509.h b/contrib/restricted/aws/s2n/crypto/s2n_openssl_x509.h
index 812b7233ba..b27a7f5a92 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_openssl_x509.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_openssl_x509.h
@@ -15,8 +15,11 @@
#pragma once
+#include <stdint.h>
#include <openssl/x509.h>
#include "utils/s2n_safety.h"
DEFINE_POINTER_CLEANUP_FUNC(X509*, X509_free);
+
+int s2n_openssl_x509_stack_pop_free(STACK_OF(X509) **cert_chain);
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_pkey.c b/contrib/restricted/aws/s2n/crypto/s2n_pkey.c
index 69c0c25178..377eb2585e 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_pkey.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_pkey.c
@@ -14,8 +14,8 @@
*/
#include <openssl/evp.h>
-#include <crypto/s2n_openssl_evp.h>
-#include <crypto/s2n_openssl_x509.h>
+#include "crypto/s2n_openssl_evp.h"
+#include "crypto/s2n_openssl_x509.h"
#include "error/s2n_errno.h"
#include "crypto/s2n_rsa_pss.h"
@@ -51,26 +51,26 @@ int s2n_pkey_setup_for_type(struct s2n_pkey *pkey, s2n_pkey_type pkey_type)
return s2n_rsa_pss_pkey_init(pkey);
case S2N_PKEY_TYPE_SENTINEL:
case S2N_PKEY_TYPE_UNKNOWN:
- S2N_ERROR(S2N_ERR_CERT_TYPE_UNSUPPORTED);
+ POSIX_BAIL(S2N_ERR_CERT_TYPE_UNSUPPORTED);
}
- S2N_ERROR(S2N_ERR_CERT_TYPE_UNSUPPORTED);
+ POSIX_BAIL(S2N_ERR_CERT_TYPE_UNSUPPORTED);
}
int s2n_pkey_check_key_exists(const struct s2n_pkey *pkey)
{
- notnull_check(pkey->pkey);
- notnull_check(pkey->check_key);
+ POSIX_ENSURE_REF(pkey->pkey);
+ POSIX_ENSURE_REF(pkey->check_key);
return pkey->check_key(pkey);
}
S2N_RESULT s2n_pkey_size(const struct s2n_pkey *pkey, uint32_t *size_out)
{
- ENSURE_REF(pkey);
- ENSURE_REF(pkey->size);
- ENSURE_REF(size_out);
+ RESULT_ENSURE_REF(pkey);
+ RESULT_ENSURE_REF(pkey->size);
+ RESULT_ENSURE_REF(size_out);
- GUARD_RESULT(pkey->size(pkey, size_out));
+ RESULT_GUARD(pkey->size(pkey, size_out));
return S2N_RESULT_OK;
}
@@ -78,7 +78,7 @@ S2N_RESULT s2n_pkey_size(const struct s2n_pkey *pkey, uint32_t *size_out)
int s2n_pkey_sign(const struct s2n_pkey *pkey, s2n_signature_algorithm sig_alg,
struct s2n_hash_state *digest, struct s2n_blob *signature)
{
- notnull_check(pkey->sign);
+ POSIX_ENSURE_REF(pkey->sign);
return pkey->sign(pkey, sig_alg, digest, signature);
}
@@ -86,29 +86,29 @@ int s2n_pkey_sign(const struct s2n_pkey *pkey, s2n_signature_algorithm sig_alg,
int s2n_pkey_verify(const struct s2n_pkey *pkey, s2n_signature_algorithm sig_alg,
struct s2n_hash_state *digest, struct s2n_blob *signature)
{
- notnull_check(pkey);
- notnull_check(pkey->verify);
+ POSIX_ENSURE_REF(pkey);
+ POSIX_ENSURE_REF(pkey->verify);
return pkey->verify(pkey, sig_alg, digest, signature);
}
int s2n_pkey_encrypt(const struct s2n_pkey *pkey, struct s2n_blob *in, struct s2n_blob *out)
{
- notnull_check(pkey->encrypt);
+ POSIX_ENSURE_REF(pkey->encrypt);
return pkey->encrypt(pkey, in, out);
}
int s2n_pkey_decrypt(const struct s2n_pkey *pkey, struct s2n_blob *in, struct s2n_blob *out)
{
- notnull_check(pkey->decrypt);
+ POSIX_ENSURE_REF(pkey->decrypt);
return pkey->decrypt(pkey, in, out);
}
int s2n_pkey_match(const struct s2n_pkey *pub_key, const struct s2n_pkey *priv_key)
{
- notnull_check(pub_key->match);
+ POSIX_ENSURE_REF(pub_key->match);
S2N_ERROR_IF(pub_key->match != priv_key->match, S2N_ERR_KEY_MISMATCH);
@@ -118,7 +118,7 @@ int s2n_pkey_match(const struct s2n_pkey *pub_key, const struct s2n_pkey *priv_k
int s2n_pkey_free(struct s2n_pkey *key)
{
if (key != NULL && key->free != NULL) {
- GUARD(key->free(key));
+ POSIX_GUARD(key->free(key));
}
if (key->pkey != NULL) {
@@ -141,7 +141,7 @@ int s2n_asn1der_to_private_key(struct s2n_pkey *priv_key, struct s2n_blob *asn1d
/* If key parsing is successful, d2i_AutoPrivateKey increments *key_to_parse to the byte following the parsed data */
uint32_t parsed_len = key_to_parse - asn1der->data;
if (parsed_len != asn1der->size) {
- S2N_ERROR(S2N_ERR_DECODE_PRIVATE_KEY);
+ POSIX_BAIL(S2N_ERR_DECODE_PRIVATE_KEY);
}
/* Initialize s2n_pkey according to key type */
@@ -171,7 +171,7 @@ int s2n_asn1der_to_private_key(struct s2n_pkey *priv_key, struct s2n_blob *asn1d
ret = s2n_evp_pkey_to_ecdsa_private_key(&priv_key->key.ecdsa_key, evp_private_key);
break;
default:
- S2N_ERROR(S2N_ERR_DECODE_PRIVATE_KEY);
+ POSIX_BAIL(S2N_ERR_DECODE_PRIVATE_KEY);
}
priv_key->pkey = evp_private_key;
@@ -195,7 +195,7 @@ int s2n_asn1der_to_public_key_and_type(struct s2n_pkey *pub_key, s2n_pkey_type *
/* Some TLS clients in the wild send extra trailing bytes after the Certificate.
* Allow this in s2n for backwards compatibility with existing clients. */
uint32_t trailing_bytes = asn1der->size - parsed_len;
- ENSURE_POSIX(trailing_bytes <= S2N_MAX_ALLOWED_CERT_TRAILING_BYTES, S2N_ERR_DECODE_CERTIFICATE);
+ POSIX_ENSURE(trailing_bytes <= S2N_MAX_ALLOWED_CERT_TRAILING_BYTES, S2N_ERR_DECODE_CERTIFICATE);
DEFER_CLEANUP(EVP_PKEY *evp_public_key = X509_get_pubkey(cert), EVP_PKEY_free_pointer);
S2N_ERROR_IF(evp_public_key == NULL, S2N_ERR_DECODE_CERTIFICATE);
@@ -230,7 +230,7 @@ int s2n_asn1der_to_public_key_and_type(struct s2n_pkey *pub_key, s2n_pkey_type *
*pkey_type_out = S2N_PKEY_TYPE_ECDSA;
break;
default:
- S2N_ERROR(S2N_ERR_DECODE_CERTIFICATE);
+ POSIX_BAIL(S2N_ERR_DECODE_CERTIFICATE);
}
pub_key->pkey = evp_public_key;
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_rsa.c b/contrib/restricted/aws/s2n/crypto/s2n_rsa.c
index cfce3054bf..164496f93f 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_rsa.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_rsa.c
@@ -22,6 +22,7 @@
#include "crypto/s2n_drbg.h"
#include "crypto/s2n_hash.h"
#include "crypto/s2n_pkey.h"
+#include "crypto/s2n_evp_signing.h"
#include "crypto/s2n_rsa_signing.h"
#include "error/s2n_errno.h"
#include "stuffer/s2n_stuffer.h"
@@ -37,24 +38,24 @@ static S2N_RESULT s2n_rsa_modulus_check(RSA *rsa)
const BIGNUM *n = NULL;
/* RSA still owns the memory for n */
RSA_get0_key(rsa, &n, NULL, NULL);
- ENSURE_REF(n);
+ RESULT_ENSURE_REF(n);
#else
- ENSURE_REF(rsa->n);
+ RESULT_ENSURE_REF(rsa->n);
#endif
return S2N_RESULT_OK;
}
static S2N_RESULT s2n_rsa_encrypted_size(const struct s2n_pkey *key, uint32_t *size_out)
{
- ENSURE_REF(key);
- ENSURE_REF(size_out);
+ RESULT_ENSURE_REF(key);
+ RESULT_ENSURE_REF(size_out);
const struct s2n_rsa_key *rsa_key = &key->key.rsa_key;
- ENSURE_REF(rsa_key->rsa);
- GUARD_RESULT(s2n_rsa_modulus_check(rsa_key->rsa));
+ RESULT_ENSURE_REF(rsa_key->rsa);
+ RESULT_GUARD(s2n_rsa_modulus_check(rsa_key->rsa));
const int size = RSA_size(rsa_key->rsa);
- GUARD_AS_RESULT(size);
+ RESULT_GUARD_POSIX(size);
*size_out = size;
return S2N_RESULT_OK;
@@ -69,7 +70,7 @@ static int s2n_rsa_sign(const struct s2n_pkey *priv, s2n_signature_algorithm sig
case S2N_SIGNATURE_RSA_PSS_RSAE:
return s2n_rsa_pss_sign(priv, digest, signature);
default:
- S2N_ERROR(S2N_ERR_INVALID_SIGNATURE_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_INVALID_SIGNATURE_ALGORITHM);
}
return S2N_SUCCESS;
@@ -84,7 +85,7 @@ static int s2n_rsa_verify(const struct s2n_pkey *pub, s2n_signature_algorithm si
case S2N_SIGNATURE_RSA_PSS_RSAE:
return s2n_rsa_pss_verify(pub, digest, signature);
default:
- S2N_ERROR(S2N_ERR_INVALID_SIGNATURE_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_INVALID_SIGNATURE_ALGORITHM);
}
return S2N_SUCCESS;
@@ -93,7 +94,7 @@ static int s2n_rsa_verify(const struct s2n_pkey *pub, s2n_signature_algorithm si
static int s2n_rsa_encrypt(const struct s2n_pkey *pub, struct s2n_blob *in, struct s2n_blob *out)
{
uint32_t size = 0;
- GUARD_AS_POSIX(s2n_rsa_encrypted_size(pub, &size));
+ POSIX_GUARD_RESULT(s2n_rsa_encrypted_size(pub, &size));
S2N_ERROR_IF(out->size < size, S2N_ERR_NOMEM);
const s2n_rsa_public_key *key = &pub->key.rsa_key;
@@ -109,12 +110,12 @@ static int s2n_rsa_decrypt(const struct s2n_pkey *priv, struct s2n_blob *in, str
unsigned char intermediate[ 4096 ];
uint32_t expected_size = 0;
- GUARD_AS_POSIX(s2n_rsa_encrypted_size(priv, &expected_size));
+ POSIX_GUARD_RESULT(s2n_rsa_encrypted_size(priv, &expected_size));
S2N_ERROR_IF(expected_size > sizeof(intermediate), S2N_ERR_NOMEM);
S2N_ERROR_IF(out->size > sizeof(intermediate), S2N_ERR_NOMEM);
- GUARD_AS_POSIX(s2n_get_public_random_data(out));
+ POSIX_GUARD_RESULT(s2n_get_public_random_data(out));
const s2n_rsa_private_key *key = &priv->key.rsa_key;
int r = RSA_private_decrypt(in->size, ( unsigned char * )in->data, intermediate, key->rsa, RSA_NO_PADDING);
@@ -134,13 +135,13 @@ static int s2n_rsa_keys_match(const struct s2n_pkey *pub, const struct s2n_pkey
plain_in.size = sizeof(plain_inpad);
enc.data = encpad;
- GUARD_AS_POSIX(s2n_rsa_encrypted_size(pub, &enc.size));
- lte_check(enc.size, sizeof(encpad));
- GUARD(s2n_rsa_encrypt(pub, &plain_in, &enc));
+ POSIX_GUARD_RESULT(s2n_rsa_encrypted_size(pub, &enc.size));
+ POSIX_ENSURE_LTE(enc.size, sizeof(encpad));
+ POSIX_GUARD(s2n_rsa_encrypt(pub, &plain_in, &enc));
plain_out.data = plain_outpad;
plain_out.size = sizeof(plain_outpad);
- GUARD(s2n_rsa_decrypt(priv, &enc, &plain_out));
+ POSIX_GUARD(s2n_rsa_decrypt(priv, &enc, &plain_out));
S2N_ERROR_IF(memcmp(plain_in.data, plain_out.data, plain_in.size), S2N_ERR_KEY_MISMATCH);
@@ -161,7 +162,7 @@ static int s2n_rsa_key_free(struct s2n_pkey *pkey)
static int s2n_rsa_check_key_exists(const struct s2n_pkey *pkey)
{
const struct s2n_rsa_key *rsa_key = &pkey->key.rsa_key;
- notnull_check(rsa_key->rsa);
+ POSIX_ENSURE_REF(rsa_key->rsa);
return 0;
}
@@ -193,6 +194,7 @@ int s2n_rsa_pkey_init(struct s2n_pkey *pkey)
pkey->match = &s2n_rsa_keys_match;
pkey->free = &s2n_rsa_key_free;
pkey->check_key = &s2n_rsa_check_key_exists;
+ POSIX_GUARD_RESULT(s2n_evp_signing_set_pkey_overrides(pkey));
return 0;
}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_rsa.h b/contrib/restricted/aws/s2n/crypto/s2n_rsa.h
index 3d153eaf3d..3bba4ff863 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_rsa.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_rsa.h
@@ -16,7 +16,7 @@
#pragma once
#include <stdint.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include <openssl/rsa.h>
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_rsa_pss.c b/contrib/restricted/aws/s2n/crypto/s2n_rsa_pss.c
index 98fbd6ad23..da034d6ad3 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_rsa_pss.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_rsa_pss.c
@@ -21,6 +21,7 @@
#include "stuffer/s2n_stuffer.h"
+#include "crypto/s2n_evp_signing.h"
#include "crypto/s2n_hash.h"
#include "crypto/s2n_openssl.h"
#include "crypto/s2n_rsa.h"
@@ -43,12 +44,12 @@ int s2n_is_rsa_pss_certs_supported()
static S2N_RESULT s2n_rsa_pss_size(const struct s2n_pkey *key, uint32_t *size_out)
{
- ENSURE_REF(key);
- ENSURE_REF(size_out);
+ RESULT_ENSURE_REF(key);
+ RESULT_ENSURE_REF(size_out);
/* For more info, see: https://www.openssl.org/docs/man1.1.0/man3/EVP_PKEY_size.html */
const int size = EVP_PKEY_size(key->pkey);
- GUARD_AS_RESULT(size);
+ RESULT_GUARD_POSIX(size);
*size_out = size;
return S2N_RESULT_OK;
@@ -68,7 +69,7 @@ static int s2n_rsa_is_private_key(RSA *rsa_key)
int s2n_rsa_pss_key_sign(const struct s2n_pkey *priv, s2n_signature_algorithm sig_alg,
struct s2n_hash_state *digest, struct s2n_blob *signature_out)
{
- notnull_check(priv);
+ POSIX_ENSURE_REF(priv);
sig_alg_check(sig_alg, S2N_SIGNATURE_RSA_PSS_PSS);
/* Not Possible to Sign with Public Key */
@@ -80,7 +81,7 @@ int s2n_rsa_pss_key_sign(const struct s2n_pkey *priv, s2n_signature_algorithm si
int s2n_rsa_pss_key_verify(const struct s2n_pkey *pub, s2n_signature_algorithm sig_alg,
struct s2n_hash_state *digest, struct s2n_blob *signature_in)
{
- notnull_check(pub);
+ POSIX_ENSURE_REF(pub);
sig_alg_check(sig_alg, S2N_SIGNATURE_RSA_PSS_PSS);
/* Using Private Key to Verify means the public/private keys were likely swapped, and likely indicates a bug. */
@@ -93,22 +94,22 @@ static int s2n_rsa_pss_validate_sign_verify_match(const struct s2n_pkey *pub, co
{
/* Generate a random blob to sign and verify */
s2n_stack_blob(random_data, RSA_PSS_SIGN_VERIFY_RANDOM_BLOB_SIZE, RSA_PSS_SIGN_VERIFY_RANDOM_BLOB_SIZE);
- GUARD_AS_POSIX(s2n_get_private_random_data(&random_data));
+ POSIX_GUARD_RESULT(s2n_get_private_random_data(&random_data));
/* Sign/Verify API's only accept Hashes, so hash our Random Data */
DEFER_CLEANUP(struct s2n_hash_state sign_hash = {0}, s2n_hash_free);
DEFER_CLEANUP(struct s2n_hash_state verify_hash = {0}, s2n_hash_free);
- GUARD(s2n_hash_new(&sign_hash));
- GUARD(s2n_hash_new(&verify_hash));
- GUARD(s2n_hash_init(&sign_hash, S2N_HASH_SHA256));
- GUARD(s2n_hash_init(&verify_hash, S2N_HASH_SHA256));
- GUARD(s2n_hash_update(&sign_hash, random_data.data, random_data.size));
- GUARD(s2n_hash_update(&verify_hash, random_data.data, random_data.size));
+ POSIX_GUARD(s2n_hash_new(&sign_hash));
+ POSIX_GUARD(s2n_hash_new(&verify_hash));
+ POSIX_GUARD(s2n_hash_init(&sign_hash, S2N_HASH_SHA256));
+ POSIX_GUARD(s2n_hash_init(&verify_hash, S2N_HASH_SHA256));
+ POSIX_GUARD(s2n_hash_update(&sign_hash, random_data.data, random_data.size));
+ POSIX_GUARD(s2n_hash_update(&verify_hash, random_data.data, random_data.size));
/* Sign and Verify the Hash of the Random Blob */
s2n_stack_blob(signature_data, RSA_PSS_SIGN_VERIFY_SIGNATURE_SIZE, RSA_PSS_SIGN_VERIFY_SIGNATURE_SIZE);
- GUARD(s2n_rsa_pss_key_sign(priv, S2N_SIGNATURE_RSA_PSS_PSS, &sign_hash, &signature_data));
- GUARD(s2n_rsa_pss_key_verify(pub, S2N_SIGNATURE_RSA_PSS_PSS, &verify_hash, &signature_data));
+ POSIX_GUARD(s2n_rsa_pss_key_sign(priv, S2N_SIGNATURE_RSA_PSS_PSS, &sign_hash, &signature_data));
+ POSIX_GUARD(s2n_rsa_pss_key_verify(pub, S2N_SIGNATURE_RSA_PSS_PSS, &verify_hash, &signature_data));
return 0;
}
@@ -124,11 +125,11 @@ static int s2n_rsa_validate_params_equal(const RSA *pub, const RSA *priv)
RSA_get0_key(priv, &priv_val_n, &priv_val_e, NULL);
if (pub_val_e == NULL || priv_val_e == NULL) {
- S2N_ERROR(S2N_ERR_KEY_CHECK);
+ POSIX_BAIL(S2N_ERR_KEY_CHECK);
}
if (pub_val_n == NULL || priv_val_n == NULL) {
- S2N_ERROR(S2N_ERR_KEY_CHECK);
+ POSIX_BAIL(S2N_ERR_KEY_CHECK);
}
S2N_ERROR_IF(BN_cmp(pub_val_e, priv_val_e) != 0, S2N_ERR_KEY_MISMATCH);
@@ -139,8 +140,8 @@ static int s2n_rsa_validate_params_equal(const RSA *pub, const RSA *priv)
static int s2n_rsa_validate_params_match(const struct s2n_pkey *pub, const struct s2n_pkey *priv)
{
- notnull_check(pub);
- notnull_check(priv);
+ POSIX_ENSURE_REF(pub);
+ POSIX_ENSURE_REF(priv);
/* OpenSSL Documentation Links:
* - https://www.openssl.org/docs/manmaster/man3/EVP_PKEY_get0_RSA.html
@@ -149,10 +150,10 @@ static int s2n_rsa_validate_params_match(const struct s2n_pkey *pub, const struc
RSA *pub_rsa_key = pub->key.rsa_key.rsa;
RSA *priv_rsa_key = priv->key.rsa_key.rsa;
- notnull_check(pub_rsa_key);
- notnull_check(priv_rsa_key);
+ POSIX_ENSURE_REF(pub_rsa_key);
+ POSIX_ENSURE_REF(priv_rsa_key);
- GUARD(s2n_rsa_validate_params_equal(pub_rsa_key, priv_rsa_key));
+ POSIX_GUARD(s2n_rsa_validate_params_equal(pub_rsa_key, priv_rsa_key));
return 0;
}
@@ -160,15 +161,15 @@ static int s2n_rsa_validate_params_match(const struct s2n_pkey *pub, const struc
static int s2n_rsa_pss_keys_match(const struct s2n_pkey *pub, const struct s2n_pkey *priv)
{
- notnull_check(pub);
- notnull_check(pub->pkey);
- notnull_check(priv);
- notnull_check(priv->pkey);
+ POSIX_ENSURE_REF(pub);
+ POSIX_ENSURE_REF(pub->pkey);
+ POSIX_ENSURE_REF(priv);
+ POSIX_ENSURE_REF(priv->pkey);
- GUARD(s2n_rsa_validate_params_match(pub, priv));
+ POSIX_GUARD(s2n_rsa_validate_params_match(pub, priv));
/* Validate that verify(sign(message)) for a random message is verified correctly */
- GUARD(s2n_rsa_pss_validate_sign_verify_match(pub, priv));
+ POSIX_GUARD(s2n_rsa_pss_validate_sign_verify_match(pub, priv));
return 0;
}
@@ -193,7 +194,7 @@ int s2n_evp_pkey_to_rsa_pss_public_key(struct s2n_rsa_key *rsa_key, EVP_PKEY *pk
int s2n_evp_pkey_to_rsa_pss_private_key(struct s2n_rsa_key *rsa_key, EVP_PKEY *pkey)
{
RSA *priv_rsa_key = EVP_PKEY_get0_RSA(pkey);
- notnull_check(priv_rsa_key);
+ POSIX_ENSURE_REF(priv_rsa_key);
/* Documentation: https://www.openssl.org/docs/man1.1.1/man3/RSA_check_key.html */
S2N_ERROR_IF(!s2n_rsa_is_private_key(priv_rsa_key), S2N_ERR_KEY_MISMATCH);
@@ -201,7 +202,7 @@ int s2n_evp_pkey_to_rsa_pss_private_key(struct s2n_rsa_key *rsa_key, EVP_PKEY *p
/* Check that the mandatory properties of a RSA Private Key are valid.
* - Documentation: https://www.openssl.org/docs/man1.1.1/man3/RSA_check_key.html
*/
- GUARD_OSSL(RSA_check_key(priv_rsa_key), S2N_ERR_KEY_CHECK);
+ POSIX_GUARD_OSSL(RSA_check_key(priv_rsa_key), S2N_ERR_KEY_CHECK);
rsa_key->rsa = priv_rsa_key;
return 0;
@@ -209,7 +210,7 @@ int s2n_evp_pkey_to_rsa_pss_private_key(struct s2n_rsa_key *rsa_key, EVP_PKEY *p
int s2n_rsa_pss_pkey_init(struct s2n_pkey *pkey)
{
- GUARD(s2n_rsa_pkey_init(pkey));
+ POSIX_GUARD(s2n_rsa_pkey_init(pkey));
pkey->size = &s2n_rsa_pss_size;
pkey->sign = &s2n_rsa_pss_key_sign;
@@ -223,6 +224,7 @@ int s2n_rsa_pss_pkey_init(struct s2n_pkey *pkey)
pkey->match = &s2n_rsa_pss_keys_match;
pkey->free = &s2n_rsa_pss_key_free;
+ POSIX_GUARD_RESULT(s2n_evp_signing_set_pkey_overrides(pkey));
return 0;
}
@@ -230,17 +232,17 @@ int s2n_rsa_pss_pkey_init(struct s2n_pkey *pkey)
int s2n_evp_pkey_to_rsa_pss_public_key(struct s2n_rsa_key *rsa_pss_key, EVP_PKEY *pkey)
{
- S2N_ERROR(S2N_RSA_PSS_NOT_SUPPORTED);
+ POSIX_BAIL(S2N_RSA_PSS_NOT_SUPPORTED);
}
int s2n_evp_pkey_to_rsa_pss_private_key(struct s2n_rsa_key *rsa_pss_key, EVP_PKEY *pkey)
{
- S2N_ERROR(S2N_RSA_PSS_NOT_SUPPORTED);
+ POSIX_BAIL(S2N_RSA_PSS_NOT_SUPPORTED);
}
int s2n_rsa_pss_pkey_init(struct s2n_pkey *pkey)
{
- S2N_ERROR(S2N_RSA_PSS_NOT_SUPPORTED);
+ POSIX_BAIL(S2N_RSA_PSS_NOT_SUPPORTED);
}
#endif
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_rsa_pss.h b/contrib/restricted/aws/s2n/crypto/s2n_rsa_pss.h
index 70516551fb..82431986f3 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_rsa_pss.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_rsa_pss.h
@@ -16,7 +16,8 @@
#pragma once
#include <stdint.h>
-#include <s2n.h>
+#include "api/s2n.h"
+#include <openssl/bn.h>
#include "crypto/s2n_openssl.h"
#include "crypto/s2n_rsa.h"
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_rsa_signing.c b/contrib/restricted/aws/s2n/crypto/s2n_rsa_signing.c
index 313565380f..57a31172b7 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_rsa_signing.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_rsa_signing.c
@@ -34,7 +34,8 @@ static int s2n_hash_alg_to_NID[] = {
[S2N_HASH_SHA224] = NID_sha224,
[S2N_HASH_SHA256] = NID_sha256,
[S2N_HASH_SHA384] = NID_sha384,
- [S2N_HASH_SHA512] = NID_sha512 };
+ [S2N_HASH_SHA512] = NID_sha512
+};
int s2n_hash_NID_type(s2n_hash_algorithm alg, int *out)
{
@@ -48,46 +49,63 @@ int s2n_hash_NID_type(s2n_hash_algorithm alg, int *out)
*out = s2n_hash_alg_to_NID[alg];
break;
default:
- S2N_ERROR(S2N_ERR_HASH_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM);
}
return 0;
}
-int s2n_rsa_pkcs1v15_sign(const struct s2n_pkey *priv, struct s2n_hash_state *digest, struct s2n_blob *signature)
+int s2n_rsa_pkcs1v15_sign_digest(const struct s2n_pkey *priv, s2n_hash_algorithm hash_alg,
+ struct s2n_blob *digest, struct s2n_blob *signature)
{
- uint8_t digest_length;
- int NID_type;
- GUARD(s2n_hash_digest_size(digest->alg, &digest_length));
- GUARD(s2n_hash_NID_type(digest->alg, &NID_type));
- lte_check(digest_length, S2N_MAX_DIGEST_LEN);
+ POSIX_ENSURE_REF(priv);
+ POSIX_ENSURE_REF(digest);
+ POSIX_ENSURE_REF(signature);
- const s2n_rsa_private_key *key = &priv->key.rsa_key;
+ int NID_type = 0;
+ POSIX_GUARD(s2n_hash_NID_type(hash_alg, &NID_type));
- uint8_t digest_out[S2N_MAX_DIGEST_LEN];
- GUARD(s2n_hash_digest(digest, digest_out, digest_length));
+ const s2n_rsa_private_key *key = &priv->key.rsa_key;
unsigned int signature_size = signature->size;
- GUARD_OSSL(RSA_sign(NID_type, digest_out, digest_length, signature->data, &signature_size, key->rsa), S2N_ERR_SIGN);
- S2N_ERROR_IF(signature_size > signature->size, S2N_ERR_SIZE_MISMATCH);
+ POSIX_GUARD_OSSL(RSA_sign(NID_type, digest->data, digest->size, signature->data, &signature_size, key->rsa), S2N_ERR_SIGN);
+ POSIX_ENSURE(signature_size <= signature->size, S2N_ERR_SIZE_MISMATCH);
signature->size = signature_size;
- return 0;
+ return S2N_SUCCESS;
+}
+
+int s2n_rsa_pkcs1v15_sign(const struct s2n_pkey *priv, struct s2n_hash_state *digest, struct s2n_blob *signature)
+{
+ POSIX_ENSURE_REF(digest);
+
+ uint8_t digest_length = 0;
+ POSIX_GUARD(s2n_hash_digest_size(digest->alg, &digest_length));
+ POSIX_ENSURE_LTE(digest_length, S2N_MAX_DIGEST_LEN);
+
+ uint8_t digest_out[S2N_MAX_DIGEST_LEN] = { 0 };
+ POSIX_GUARD(s2n_hash_digest(digest, digest_out, digest_length));
+
+ struct s2n_blob digest_blob = { 0 };
+ POSIX_GUARD(s2n_blob_init(&digest_blob, digest_out, digest_length));
+ POSIX_GUARD(s2n_rsa_pkcs1v15_sign_digest(priv, digest->alg, &digest_blob, signature));
+
+ return S2N_SUCCESS;
}
int s2n_rsa_pkcs1v15_verify(const struct s2n_pkey *pub, struct s2n_hash_state *digest, struct s2n_blob *signature)
{
uint8_t digest_length;
int digest_NID_type;
- GUARD(s2n_hash_digest_size(digest->alg, &digest_length));
- GUARD(s2n_hash_NID_type(digest->alg, &digest_NID_type));
- lte_check(digest_length, S2N_MAX_DIGEST_LEN);
+ POSIX_GUARD(s2n_hash_digest_size(digest->alg, &digest_length));
+ POSIX_GUARD(s2n_hash_NID_type(digest->alg, &digest_NID_type));
+ POSIX_ENSURE_LTE(digest_length, S2N_MAX_DIGEST_LEN);
const s2n_rsa_public_key *key = &pub->key.rsa_key;
uint8_t digest_out[S2N_MAX_DIGEST_LEN];
- GUARD(s2n_hash_digest(digest, digest_out, digest_length));
+ POSIX_GUARD(s2n_hash_digest(digest, digest_out, digest_length));
- GUARD_OSSL(RSA_verify(digest_NID_type, digest_out, digest_length, signature->data, signature->size, key->rsa), S2N_ERR_VERIFY_SIGNATURE);
+ POSIX_GUARD_OSSL(RSA_verify(digest_NID_type, digest_out, digest_length, signature->data, signature->size, key->rsa), S2N_ERR_VERIFY_SIGNATURE);
return 0;
}
@@ -100,33 +118,10 @@ int s2n_is_rsa_pss_signing_supported()
#if RSA_PSS_SIGNING_SUPPORTED
-const EVP_MD* s2n_hash_alg_to_evp_alg(s2n_hash_algorithm alg)
-{
- switch (alg) {
- case S2N_HASH_MD5_SHA1:
- return EVP_md5_sha1();
- case S2N_HASH_SHA1:
- return EVP_sha1();
- case S2N_HASH_SHA224:
- return EVP_sha224();
- case S2N_HASH_SHA256:
- return EVP_sha256();
- case S2N_HASH_SHA384:
- return EVP_sha384();
- case S2N_HASH_SHA512:
- return EVP_sha512();
- default:
- return NULL;
- }
-}
-
-/* On some versions of OpenSSL, "EVP_PKEY_CTX_set_signature_md()" is just a macro that casts digest_alg to "void*",
- * which fails to compile when the "-Werror=cast-qual" compiler flag is enabled. So we work around this OpenSSL
- * issue by turning off this compiler check for this one function with a cast through. */
static int s2n_evp_pkey_ctx_set_rsa_signature_digest(EVP_PKEY_CTX *ctx, const EVP_MD* digest_alg)
{
- GUARD_OSSL(EVP_PKEY_CTX_set_signature_md(ctx,(EVP_MD*) (uintptr_t) digest_alg), S2N_ERR_INVALID_SIGNATURE_ALGORITHM);
- GUARD_OSSL(EVP_PKEY_CTX_set_rsa_mgf1_md(ctx, (EVP_MD*) (uintptr_t) digest_alg), S2N_ERR_INVALID_SIGNATURE_ALGORITHM);
+ POSIX_GUARD_OSSL(S2N_EVP_PKEY_CTX_set_signature_md(ctx, digest_alg), S2N_ERR_INVALID_SIGNATURE_ALGORITHM);
+ POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_mgf1_md(ctx, (EVP_MD*) (uintptr_t) digest_alg), S2N_ERR_INVALID_SIGNATURE_ALGORITHM);
return 0;
}
@@ -135,72 +130,94 @@ static void s2n_evp_pkey_ctx_free(EVP_PKEY_CTX **ctx)
EVP_PKEY_CTX_free(*ctx);
}
-int s2n_rsa_pss_sign(const struct s2n_pkey *priv, struct s2n_hash_state *digest, struct s2n_blob *signature_out)
+int s2n_rsa_pss_sign_digest(const struct s2n_pkey *priv, s2n_hash_algorithm hash_alg,
+ struct s2n_blob *digest_in, struct s2n_blob *signature_out)
{
- notnull_check(priv);
-
- uint8_t digest_length;
- uint8_t digest_data[S2N_MAX_DIGEST_LEN];
- GUARD(s2n_hash_digest_size(digest->alg, &digest_length));
- GUARD(s2n_hash_digest(digest, digest_data, digest_length));
+ POSIX_ENSURE_REF(priv);
+ POSIX_ENSURE_REF(digest_in);
+ POSIX_ENSURE_REF(signature_out);
- const EVP_MD* digest_alg = s2n_hash_alg_to_evp_alg(digest->alg);
- notnull_check(digest_alg);
+ const EVP_MD* digest_alg = s2n_hash_alg_to_evp_md(hash_alg);
+ POSIX_ENSURE_REF(digest_alg);
/* For more info see: https://www.openssl.org/docs/manmaster/man3/EVP_PKEY_sign.html */
DEFER_CLEANUP(EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(priv->pkey, NULL), s2n_evp_pkey_ctx_free);
- notnull_check(ctx);
+ POSIX_ENSURE_REF(ctx);
size_t signature_len = signature_out->size;
- GUARD_OSSL(EVP_PKEY_sign_init(ctx), S2N_ERR_SIGN);
- GUARD_OSSL(EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PSS_PADDING), S2N_ERR_SIGN);
- GUARD(s2n_evp_pkey_ctx_set_rsa_signature_digest(ctx, digest_alg));
- GUARD_OSSL(EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, RSA_PSS_SALTLEN_DIGEST), S2N_ERR_SIGN);
+ POSIX_GUARD_OSSL(EVP_PKEY_sign_init(ctx), S2N_ERR_SIGN);
+ POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PSS_PADDING), S2N_ERR_SIGN);
+ POSIX_GUARD(s2n_evp_pkey_ctx_set_rsa_signature_digest(ctx, digest_alg));
+ POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, RSA_PSS_SALTLEN_DIGEST), S2N_ERR_SIGN);
/* Calling EVP_PKEY_sign() with NULL will only update the signature_len parameter so users can validate sizes. */
- GUARD_OSSL(EVP_PKEY_sign(ctx, NULL, &signature_len, digest_data, digest_length), S2N_ERR_SIGN);
- S2N_ERROR_IF(signature_len > signature_out->size, S2N_ERR_SIZE_MISMATCH);
+ POSIX_GUARD_OSSL(EVP_PKEY_sign(ctx, NULL, &signature_len, digest_in->data, digest_in->size), S2N_ERR_SIGN);
+ POSIX_ENSURE(signature_len <= signature_out->size, S2N_ERR_SIZE_MISMATCH);
- /* Actually sign the the digest */
- GUARD_OSSL(EVP_PKEY_sign(ctx, signature_out->data, &signature_len, digest_data, digest_length), S2N_ERR_SIGN);
+ /* Actually sign the digest */
+ POSIX_GUARD_OSSL(EVP_PKEY_sign(ctx, signature_out->data, &signature_len, digest_in->data, digest_in->size), S2N_ERR_SIGN);
signature_out->size = signature_len;
- return 0;
+ return S2N_SUCCESS;
+}
+
+int s2n_rsa_pss_sign(const struct s2n_pkey *priv, struct s2n_hash_state *digest, struct s2n_blob *signature_out)
+{
+ POSIX_ENSURE_REF(digest);
+
+ uint8_t digest_length = 0;
+ uint8_t digest_data[S2N_MAX_DIGEST_LEN] = { 0 };
+ POSIX_GUARD(s2n_hash_digest_size(digest->alg, &digest_length));
+ POSIX_GUARD(s2n_hash_digest(digest, digest_data, digest_length));
+
+ struct s2n_blob digest_blob = { 0 };
+ POSIX_GUARD(s2n_blob_init(&digest_blob, digest_data, digest_length));
+ POSIX_GUARD(s2n_rsa_pss_sign_digest(priv, digest->alg, &digest_blob, signature_out));
+
+ return S2N_SUCCESS;
}
int s2n_rsa_pss_verify(const struct s2n_pkey *pub, struct s2n_hash_state *digest, struct s2n_blob *signature_in)
{
- notnull_check(pub);
+ POSIX_ENSURE_REF(pub);
uint8_t digest_length;
uint8_t digest_data[S2N_MAX_DIGEST_LEN];
- GUARD(s2n_hash_digest_size(digest->alg, &digest_length));
- GUARD(s2n_hash_digest(digest, digest_data, digest_length));
- const EVP_MD* digest_alg = s2n_hash_alg_to_evp_alg(digest->alg);
- notnull_check(digest_alg);
+ POSIX_GUARD(s2n_hash_digest_size(digest->alg, &digest_length));
+ POSIX_GUARD(s2n_hash_digest(digest, digest_data, digest_length));
+ const EVP_MD* digest_alg = s2n_hash_alg_to_evp_md(digest->alg);
+ POSIX_ENSURE_REF(digest_alg);
/* For more info see: https://www.openssl.org/docs/manmaster/man3/EVP_PKEY_verify.html */
DEFER_CLEANUP(EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(pub->pkey, NULL), s2n_evp_pkey_ctx_free);
- notnull_check(ctx);
+ POSIX_ENSURE_REF(ctx);
- GUARD_OSSL(EVP_PKEY_verify_init(ctx), S2N_ERR_VERIFY_SIGNATURE);
- GUARD_OSSL(EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PSS_PADDING), S2N_ERR_SIGN);
- GUARD(s2n_evp_pkey_ctx_set_rsa_signature_digest(ctx, digest_alg));
- GUARD_OSSL(EVP_PKEY_verify(ctx, signature_in->data, signature_in->size, digest_data, digest_length), S2N_ERR_VERIFY_SIGNATURE);
+ POSIX_GUARD_OSSL(EVP_PKEY_verify_init(ctx), S2N_ERR_VERIFY_SIGNATURE);
+ POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PSS_PADDING), S2N_ERR_VERIFY_SIGNATURE);
+ POSIX_GUARD(s2n_evp_pkey_ctx_set_rsa_signature_digest(ctx, digest_alg));
+ POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, RSA_PSS_SALTLEN_DIGEST), S2N_ERR_VERIFY_SIGNATURE);
- return 0;
+ POSIX_GUARD_OSSL(EVP_PKEY_verify(ctx, signature_in->data, signature_in->size,
+ digest_data, digest_length), S2N_ERR_VERIFY_SIGNATURE);
+ return S2N_SUCCESS;
}
#else
+int s2n_rsa_pss_sign_digest(const struct s2n_pkey *priv, s2n_hash_algorithm hash_alg,
+ struct s2n_blob *digest_in, struct s2n_blob *signature_out)
+{
+ POSIX_BAIL(S2N_RSA_PSS_NOT_SUPPORTED);
+}
+
int s2n_rsa_pss_sign(const struct s2n_pkey *priv, struct s2n_hash_state *digest, struct s2n_blob *signature_out)
{
- S2N_ERROR(S2N_RSA_PSS_NOT_SUPPORTED);
+ POSIX_BAIL(S2N_RSA_PSS_NOT_SUPPORTED);
}
int s2n_rsa_pss_verify(const struct s2n_pkey *pub, struct s2n_hash_state *digest, struct s2n_blob *signature_in)
{
- S2N_ERROR(S2N_RSA_PSS_NOT_SUPPORTED);
+ POSIX_BAIL(S2N_RSA_PSS_NOT_SUPPORTED);
}
#endif
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_rsa_signing.h b/contrib/restricted/aws/s2n/crypto/s2n_rsa_signing.h
index 6013768a96..bf14928426 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_rsa_signing.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_rsa_signing.h
@@ -15,14 +15,14 @@
#pragma once
-#include <s2n.h>
+#include "api/s2n.h"
#include "utils/s2n_blob.h"
#include "crypto/s2n_openssl.h"
#include "crypto/s2n_rsa.h"
/* Check for libcrypto 1.1 for RSA PSS Signing and EV_Key usage */
-#if S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 1) && !defined(LIBRESSL_VERSION_NUMBER) && !defined(OPENSSL_IS_BORINGSSL) && !defined(OPENSSL_IS_AWSLC)
+#if (S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 1) || defined(OPENSSL_IS_AWSLC)) && !defined(LIBRESSL_VERSION_NUMBER) && !defined(OPENSSL_IS_BORINGSSL)
#define RSA_PSS_SIGNING_SUPPORTED 1
#else
#define RSA_PSS_SIGNING_SUPPORTED 0
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_sequence.c b/contrib/restricted/aws/s2n/crypto/s2n_sequence.c
index 2211653817..611d7ac5b4 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_sequence.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_sequence.c
@@ -25,7 +25,8 @@
int s2n_increment_sequence_number(struct s2n_blob *sequence_number)
{
- for (int i = sequence_number->size - 1; i >= 0; i--) {
+ for (uint32_t j = sequence_number->size; j > 0; j--) {
+ uint32_t i = j - 1;
sequence_number->data[i] += 1;
if (sequence_number->data[i]) {
break;
@@ -45,13 +46,13 @@ int s2n_increment_sequence_number(struct s2n_blob *sequence_number)
int s2n_sequence_number_to_uint64(struct s2n_blob *sequence_number, uint64_t *output)
{
- notnull_check(sequence_number);
+ POSIX_ENSURE_REF(sequence_number);
uint8_t shift = 0;
*output = 0;
- for (int i = sequence_number->size - 1; i >= 0; i--) {
- *output += ((uint64_t) sequence_number->data[i]) << shift;
+ for (uint32_t i = sequence_number->size; i > 0; i--) {
+ *output += ((uint64_t) sequence_number->data[i-1]) << shift;
shift += SEQUENCE_NUMBER_POWER;
}
return S2N_SUCCESS;
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_signature.h b/contrib/restricted/aws/s2n/crypto/s2n_signature.h
index 0206578a5a..b097e93afb 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_signature.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_signature.h
@@ -16,14 +16,14 @@
#include "tls/s2n_tls_parameters.h"
-#define sig_alg_check(a, b) do { if ( (a) != (b) ) { S2N_ERROR(S2N_ERR_INVALID_SIGNATURE_ALGORITHM); } } while(0)
+#define sig_alg_check(a, b) do { if ( (a) != (b) ) { POSIX_BAIL(S2N_ERR_INVALID_SIGNATURE_ALGORITHM); } } while(0)
typedef enum {
- S2N_SIGNATURE_ANONYMOUS = TLS_SIGNATURE_ALGORITHM_ANONYMOUS,
- S2N_SIGNATURE_RSA = TLS_SIGNATURE_ALGORITHM_RSA,
- S2N_SIGNATURE_ECDSA = TLS_SIGNATURE_ALGORITHM_ECDSA,
+ S2N_SIGNATURE_ANONYMOUS = S2N_TLS_SIGNATURE_ANONYMOUS,
+ S2N_SIGNATURE_RSA = S2N_TLS_SIGNATURE_RSA,
+ S2N_SIGNATURE_ECDSA = S2N_TLS_SIGNATURE_ECDSA,
/* Use Private Range for RSA PSS */
- S2N_SIGNATURE_RSA_PSS_RSAE = TLS_SIGNATURE_ALGORITHM_PRIVATE,
+ S2N_SIGNATURE_RSA_PSS_RSAE = S2N_TLS_SIGNATURE_RSA_PSS_RSAE,
S2N_SIGNATURE_RSA_PSS_PSS
} s2n_signature_algorithm;
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_null.c b/contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_null.c
index ef09ea0a0f..6e4681782b 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_null.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_null.c
@@ -30,7 +30,7 @@ static int s2n_stream_cipher_null_endecrypt(struct s2n_session_key *key, struct
S2N_ERROR_IF(out->size < in->size, S2N_ERR_SIZE_MISMATCH);
if (in->data != out->data) {
- memcpy_check(out->data, in->data, out->size);
+ POSIX_CHECKED_MEMCPY(out->data, in->data, out->size);
}
return 0;
}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_rc4.c b/contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_rc4.c
index 78c5ea3d8b..9cc41ff677 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_rc4.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_rc4.c
@@ -28,10 +28,10 @@ static uint8_t s2n_stream_cipher_rc4_available()
static int s2n_stream_cipher_rc4_encrypt(struct s2n_session_key *key, struct s2n_blob *in, struct s2n_blob *out)
{
- gte_check(out->size, in->size);
+ POSIX_ENSURE_GTE(out->size, in->size);
int len = out->size;
- GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT);
S2N_ERROR_IF(len != in->size, S2N_ERR_ENCRYPT);
@@ -40,10 +40,10 @@ static int s2n_stream_cipher_rc4_encrypt(struct s2n_session_key *key, struct s2n
static int s2n_stream_cipher_rc4_decrypt(struct s2n_session_key *key, struct s2n_blob *in, struct s2n_blob *out)
{
- gte_check(out->size, in->size);
+ POSIX_ENSURE_GTE(out->size, in->size);
int len = out->size;
- GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT);
+ POSIX_GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT);
S2N_ERROR_IF(len != in->size, S2N_ERR_ENCRYPT);
@@ -52,16 +52,16 @@ static int s2n_stream_cipher_rc4_decrypt(struct s2n_session_key *key, struct s2n
static int s2n_stream_cipher_rc4_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 16);
- GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_rc4(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_ENSURE_EQ(in->size, 16);
+ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_rc4(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
return 0;
}
static int s2n_stream_cipher_rc4_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in)
{
- eq_check(in->size, 16);
- GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_rc4(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
+ POSIX_ENSURE_EQ(in->size, 16);
+ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_rc4(), NULL, in->data, NULL), S2N_ERR_KEY_INIT);
return 0;
}
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_tls13_keys.c b/contrib/restricted/aws/s2n/crypto/s2n_tls13_keys.c
index 1b472937ad..83d5e64951 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_tls13_keys.c
+++ b/contrib/restricted/aws/s2n/crypto/s2n_tls13_keys.c
@@ -34,7 +34,7 @@
* when the relevant TLS 1.3 features are worked on.
*
* [x] binder_key
- * [ ] client_early_traffic_secret
+ * [x] client_early_traffic_secret
* [ ] early_exporter_master_secret
* [x] client_handshake_traffic_secret
* [x] server_handshake_traffic_secret
@@ -94,35 +94,19 @@ S2N_BLOB_LABEL(s2n_tls13_label_application_traffic_secret_update, "traffic upd")
static const struct s2n_blob zero_length_blob = { .data = NULL, .size = 0 };
-/* Message transcript hash based on selected HMAC algorithm */
-static int s2n_tls13_transcript_message_hash(struct s2n_tls13_keys *keys, const struct s2n_blob *message, struct s2n_blob *message_digest)
-{
- notnull_check(keys);
- notnull_check(message);
- notnull_check(message_digest);
-
- DEFER_CLEANUP(struct s2n_hash_state hash_state, s2n_hash_free);
- GUARD(s2n_hash_new(&hash_state));
- GUARD(s2n_hash_init(&hash_state, keys->hash_algorithm));
- GUARD(s2n_hash_update(&hash_state, message->data, message->size));
- GUARD(s2n_hash_digest(&hash_state, message_digest->data, message_digest->size));
-
- return 0;
-}
-
/*
* Initializes the tls13_keys struct
*/
int s2n_tls13_keys_init(struct s2n_tls13_keys *keys, s2n_hmac_algorithm alg)
{
- notnull_check(keys);
+ POSIX_ENSURE_REF(keys);
keys->hmac_algorithm = alg;
- GUARD(s2n_hmac_hash_alg(alg, &keys->hash_algorithm));
- GUARD(s2n_hash_digest_size(keys->hash_algorithm, &keys->size));
- GUARD(s2n_blob_init(&keys->extract_secret, keys->extract_secret_bytes, keys->size));
- GUARD(s2n_blob_init(&keys->derive_secret, keys->derive_secret_bytes, keys->size));
- GUARD(s2n_hmac_new(&keys->hmac));
+ POSIX_GUARD(s2n_hmac_hash_alg(alg, &keys->hash_algorithm));
+ POSIX_GUARD(s2n_hash_digest_size(keys->hash_algorithm, &keys->size));
+ POSIX_GUARD(s2n_blob_init(&keys->extract_secret, keys->extract_secret_bytes, keys->size));
+ POSIX_GUARD(s2n_blob_init(&keys->derive_secret, keys->derive_secret_bytes, keys->size));
+ POSIX_GUARD(s2n_hmac_new(&keys->hmac));
return 0;
}
@@ -131,169 +115,26 @@ int s2n_tls13_keys_init(struct s2n_tls13_keys *keys, s2n_hmac_algorithm alg)
* Frees any allocation
*/
int s2n_tls13_keys_free(struct s2n_tls13_keys *keys) {
- notnull_check(keys);
+ POSIX_ENSURE_REF(keys);
- GUARD(s2n_hmac_free(&keys->hmac));
+ POSIX_GUARD(s2n_hmac_free(&keys->hmac));
return 0;
}
/*
- * Derives binder_key from PSK.
- */
-int s2n_tls13_derive_binder_key(struct s2n_tls13_keys *keys, struct s2n_psk *psk)
-{
- notnull_check(keys);
- notnull_check(psk);
-
- struct s2n_blob *early_secret = &keys->extract_secret;
- struct s2n_blob *binder_key = &keys->derive_secret;
-
- /* Extract the early secret */
- GUARD(s2n_hkdf_extract(&keys->hmac, keys->hmac_algorithm, &zero_length_blob,
- &psk->secret, early_secret));
-
- /* Choose the correct label for the psk type */
- const struct s2n_blob *label_blob;
- if (psk->type == S2N_PSK_TYPE_EXTERNAL) {
- label_blob = &s2n_tls13_label_external_psk_binder_key;
- } else {
- label_blob = &s2n_tls13_label_resumption_psk_binder_key;
- }
-
- /* Derive the binder_key */
- s2n_tls13_key_blob(message_digest, keys->size);
- GUARD(s2n_tls13_transcript_message_hash(keys, &zero_length_blob, &message_digest));
- GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, early_secret,
- label_blob, &message_digest, binder_key));
-
- return S2N_SUCCESS;
-}
-
-/*
- * Derives early secrets
- */
-int s2n_tls13_derive_early_secrets(struct s2n_tls13_keys *keys, struct s2n_psk *psk)
-{
- notnull_check(keys);
-
- /* Early Secret */
- if (psk == NULL) {
- /* in 1-RTT, PSK is 0-filled of key length */
- s2n_tls13_key_blob(psk_ikm, keys->size);
-
- GUARD(s2n_hkdf_extract(&keys->hmac, keys->hmac_algorithm, &zero_length_blob, &psk_ikm, &keys->extract_secret));
- } else {
- /* Sanity check that an early secret exists */
- ne_check(psk->early_secret.size, 0);
- keys->extract_secret = psk->early_secret;
- }
-
- /* client_early_traffic_secret and early_exporter_master_secret can be derived here */
-
- /* derive next secret */
- s2n_tls13_key_blob(message_digest, keys->size);
- GUARD(s2n_tls13_transcript_message_hash(keys, &zero_length_blob, &message_digest));
- GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, &keys->extract_secret,
- &s2n_tls13_label_derived_secret, &message_digest, &keys->derive_secret));
-
- return S2N_SUCCESS;
-}
-
-/*
- * Derives handshake secrets
- */
-int s2n_tls13_derive_handshake_secrets(struct s2n_tls13_keys *keys,
- const struct s2n_blob *ecdhe,
- struct s2n_hash_state *client_server_hello_hash,
- struct s2n_blob *client_secret,
- struct s2n_blob *server_secret)
-{
- notnull_check(keys);
- notnull_check(ecdhe);
- notnull_check(client_server_hello_hash);
- notnull_check(client_secret);
- notnull_check(server_secret);
-
- /* Handshake Secret */
- GUARD(s2n_hkdf_extract(&keys->hmac, keys->hmac_algorithm, &keys->derive_secret, ecdhe, &keys->extract_secret));
-
- s2n_tls13_key_blob(message_digest, keys->size);
-
- /* copy the hash */
- DEFER_CLEANUP(struct s2n_hash_state hkdf_hash_copy, s2n_hash_free);
- GUARD(s2n_hash_new(&hkdf_hash_copy));
- GUARD(s2n_hash_copy(&hkdf_hash_copy, client_server_hello_hash));
- s2n_hash_digest(&hkdf_hash_copy, message_digest.data, message_digest.size);
-
- /* produce client + server traffic secrets */
- GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, &keys->extract_secret,
- &s2n_tls13_label_client_handshake_traffic_secret, &message_digest, client_secret));
- GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, &keys->extract_secret,
- &s2n_tls13_label_server_handshake_traffic_secret, &message_digest, server_secret));
-
- /* derive next secret */
- GUARD(s2n_tls13_transcript_message_hash(keys, &zero_length_blob, &message_digest));
- GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, &keys->extract_secret,
- &s2n_tls13_label_derived_secret, &message_digest, &keys->derive_secret));
-
- return 0;
-}
-
-int s2n_tls13_extract_master_secret(struct s2n_tls13_keys *keys)
-{
- s2n_tls13_key_blob(empty_key, keys->size);
-
- /* Extract master secret from derived secret */
- GUARD(s2n_hkdf_extract(&keys->hmac, keys->hmac_algorithm, &keys->derive_secret, &empty_key, &keys->extract_secret));
-
- return S2N_SUCCESS;
-}
-
-int s2n_tls13_derive_application_secret(struct s2n_tls13_keys *keys, struct s2n_hash_state *hashes, struct s2n_blob *secret_blob, s2n_mode mode)
-{
- notnull_check(keys);
- notnull_check(hashes);
- notnull_check(secret_blob);
-
- const struct s2n_blob *label_blob;
- if (mode == S2N_CLIENT) {
- label_blob = &s2n_tls13_label_client_application_traffic_secret;
- } else {
- label_blob = &s2n_tls13_label_server_application_traffic_secret;
- }
-
- /* Sanity check that input hash is of expected type */
- S2N_ERROR_IF(keys->hash_algorithm != hashes->alg, S2N_ERR_HASH_INVALID_ALGORITHM);
-
- s2n_tls13_key_blob(message_digest, keys->size);
-
- /* copy the hashes into the message_digest */
- DEFER_CLEANUP(struct s2n_hash_state hkdf_hash_copy, s2n_hash_free);
- GUARD(s2n_hash_new(&hkdf_hash_copy));
- GUARD(s2n_hash_copy(&hkdf_hash_copy, hashes));
- GUARD(s2n_hash_digest(&hkdf_hash_copy, message_digest.data, message_digest.size));
-
- /* Derive traffic secret from master secret */
- GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, &keys->extract_secret,
- label_blob, &message_digest, secret_blob));
-
- return S2N_SUCCESS;
-}
-
-/*
* Derive Traffic Key and IV based on input secret
*/
int s2n_tls13_derive_traffic_keys(struct s2n_tls13_keys *keys, struct s2n_blob *secret, struct s2n_blob *key, struct s2n_blob *iv)
{
- notnull_check(keys);
- notnull_check(secret);
- notnull_check(key);
- notnull_check(iv);
+ POSIX_ENSURE_REF(keys);
+ POSIX_ENSURE_REF(secret);
+ POSIX_ENSURE_REF(key);
+ POSIX_ENSURE_REF(iv);
- GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, secret,
+ POSIX_GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, secret,
&s2n_tls13_label_traffic_secret_key, &zero_length_blob, key));
- GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, secret,
+ POSIX_GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, secret,
&s2n_tls13_label_traffic_secret_iv, &zero_length_blob, iv));
return 0;
}
@@ -304,7 +145,7 @@ int s2n_tls13_derive_traffic_keys(struct s2n_tls13_keys *keys, struct s2n_blob *
*/
int s2n_tls13_derive_finished_key(struct s2n_tls13_keys *keys, struct s2n_blob *secret_key, struct s2n_blob *output_finish_key)
{
- GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, secret_key, &s2n_tls13_label_finished, &zero_length_blob, output_finish_key));
+ POSIX_GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, secret_key, &s2n_tls13_label_finished, &zero_length_blob, output_finish_key));
return 0;
}
@@ -316,18 +157,10 @@ int s2n_tls13_derive_finished_key(struct s2n_tls13_keys *keys, struct s2n_blob *
*/
int s2n_tls13_calculate_finished_mac(struct s2n_tls13_keys *keys, struct s2n_blob *finished_key, struct s2n_hash_state *hash_state, struct s2n_blob *finished_verify)
{
- /* Set up a blob to contain hash */
s2n_tls13_key_blob(transcript_hash, keys->size);
-
- /* Make a copy of the hash state */
- DEFER_CLEANUP(struct s2n_hash_state hash_state_copy, s2n_hash_free);
- GUARD(s2n_hash_new(&hash_state_copy));
- GUARD(s2n_hash_copy(&hash_state_copy, hash_state));
- GUARD(s2n_hash_digest(&hash_state_copy, transcript_hash.data, transcript_hash.size));
-
- GUARD(s2n_hkdf_extract(&keys->hmac, keys->hmac_algorithm, finished_key, &transcript_hash, finished_verify));
-
- return 0;
+ POSIX_GUARD(s2n_hash_digest(hash_state, transcript_hash.data, transcript_hash.size));
+ POSIX_GUARD(s2n_hkdf_extract(&keys->hmac, keys->hmac_algorithm, finished_key, &transcript_hash, finished_verify));
+ return S2N_SUCCESS;
}
/*
@@ -335,50 +168,26 @@ int s2n_tls13_calculate_finished_mac(struct s2n_tls13_keys *keys, struct s2n_blo
*/
int s2n_tls13_update_application_traffic_secret(struct s2n_tls13_keys *keys, struct s2n_blob *old_secret, struct s2n_blob *new_secret)
{
- notnull_check(keys);
- notnull_check(old_secret);
- notnull_check(new_secret);
+ POSIX_ENSURE_REF(keys);
+ POSIX_ENSURE_REF(old_secret);
+ POSIX_ENSURE_REF(new_secret);
- GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, old_secret,
+ POSIX_GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, old_secret,
&s2n_tls13_label_application_traffic_secret_update, &zero_length_blob, new_secret));
return 0;
}
-int s2n_tls13_derive_resumption_master_secret(struct s2n_tls13_keys *keys, struct s2n_hash_state *hashes, struct s2n_blob *secret_blob)
-{
- notnull_check(keys);
- notnull_check(hashes);
- notnull_check(secret_blob);
-
- /* Sanity check that input hash is of expected type */
- ENSURE_POSIX(keys->hash_algorithm == hashes->alg, S2N_ERR_HASH_INVALID_ALGORITHM);
-
- s2n_tls13_key_blob(message_digest, keys->size);
-
- /* Copy the hashes into the message_digest */
- DEFER_CLEANUP(struct s2n_hash_state hkdf_hash_copy, s2n_hash_free);
- GUARD(s2n_hash_new(&hkdf_hash_copy));
- GUARD(s2n_hash_copy(&hkdf_hash_copy, hashes));
- GUARD(s2n_hash_digest(&hkdf_hash_copy, message_digest.data, message_digest.size));
-
- /* Derive master session resumption from master secret */
- GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, &keys->extract_secret,
- &s2n_tls13_label_resumption_master_secret, &message_digest, secret_blob));
-
- return S2N_SUCCESS;
-}
-
S2N_RESULT s2n_tls13_derive_session_ticket_secret(struct s2n_tls13_keys *keys, struct s2n_blob *resumption_secret,
struct s2n_blob *ticket_nonce, struct s2n_blob *secret_blob)
{
- ENSURE_REF(keys);
- ENSURE_REF(resumption_secret);
- ENSURE_REF(ticket_nonce);
- ENSURE_REF(secret_blob);
+ RESULT_ENSURE_REF(keys);
+ RESULT_ENSURE_REF(resumption_secret);
+ RESULT_ENSURE_REF(ticket_nonce);
+ RESULT_ENSURE_REF(secret_blob);
/* Derive session ticket secret from master session resumption secret */
- GUARD_AS_RESULT(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, resumption_secret,
+ RESULT_GUARD_POSIX(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, resumption_secret,
&s2n_tls13_label_session_ticket_secret, ticket_nonce, secret_blob));
return S2N_RESULT_OK;
diff --git a/contrib/restricted/aws/s2n/crypto/s2n_tls13_keys.h b/contrib/restricted/aws/s2n/crypto/s2n_tls13_keys.h
index cf4e6185f4..48ef1bf01a 100644
--- a/contrib/restricted/aws/s2n/crypto/s2n_tls13_keys.h
+++ b/contrib/restricted/aws/s2n/crypto/s2n_tls13_keys.h
@@ -27,6 +27,12 @@
#include "utils/s2n_mem.h"
#include "utils/s2n_safety.h"
+/* Unlike TLS1.2 secrets, TLS1.3 secret lengths vary depending
+ * on the hash algorithm used to calculate them.
+ * We allocate enough space for the largest possible secret.
+ * At the moment, that is 48 bytes for S2N_HASH_SHA384 and
+ * matches the TLS1.2 secret length.
+ */
#define S2N_TLS13_SECRET_MAX_LEN SHA384_DIGEST_LENGTH
struct s2n_tls13_keys {
@@ -63,6 +69,8 @@ extern const struct s2n_blob s2n_tls13_label_server_application_traffic_secret;
extern const struct s2n_blob s2n_tls13_label_exporter_master_secret;
extern const struct s2n_blob s2n_tls13_label_resumption_master_secret;
+extern const struct s2n_blob s2n_tls13_label_finished;
+
/* Traffic secret labels */
extern const struct s2n_blob s2n_tls13_label_traffic_secret_key;
@@ -73,21 +81,10 @@ extern const struct s2n_blob s2n_tls13_label_traffic_secret_iv;
int s2n_tls13_keys_init(struct s2n_tls13_keys *handshake, s2n_hmac_algorithm alg);
int s2n_tls13_keys_free(struct s2n_tls13_keys *keys);
-int s2n_tls13_derive_binder_key(struct s2n_tls13_keys *keys, struct s2n_psk *psk);
-int s2n_tls13_derive_early_secrets(struct s2n_tls13_keys *handshake, struct s2n_psk *psk);
-int s2n_tls13_derive_handshake_secrets(struct s2n_tls13_keys *handshake,
- const struct s2n_blob *ecdhe,
- struct s2n_hash_state *client_server_hello_hash,
- struct s2n_blob *client_secret,
- struct s2n_blob *server_secret);
-int s2n_tls13_extract_master_secret(struct s2n_tls13_keys *handshake);
-int s2n_tls13_derive_application_secret(struct s2n_tls13_keys *handshake, struct s2n_hash_state *hashes, struct s2n_blob *secret_blob, s2n_mode mode);
int s2n_tls13_derive_traffic_keys(struct s2n_tls13_keys *handshake, struct s2n_blob *secret, struct s2n_blob *key, struct s2n_blob *iv);
int s2n_tls13_derive_finished_key(struct s2n_tls13_keys *keys, struct s2n_blob *secret_key, struct s2n_blob *output_finish_key);
int s2n_tls13_calculate_finished_mac(struct s2n_tls13_keys *keys, struct s2n_blob *finished_key, struct s2n_hash_state *hash_state, struct s2n_blob *finished_verify);
int s2n_tls13_update_application_traffic_secret(struct s2n_tls13_keys *keys, struct s2n_blob *old_secret, struct s2n_blob *new_secret);
-
-int s2n_tls13_derive_resumption_master_secret(struct s2n_tls13_keys *keys, struct s2n_hash_state *hashes, struct s2n_blob *secret_blob);
S2N_RESULT s2n_tls13_derive_session_ticket_secret(struct s2n_tls13_keys *keys, struct s2n_blob *resumption_secret,
struct s2n_blob *ticket_nonce, struct s2n_blob *secret_blob);
diff --git a/contrib/restricted/aws/s2n/error/s2n_errno.c b/contrib/restricted/aws/s2n/error/s2n_errno.c
index 1fcc1b6634..a875611e8b 100644
--- a/contrib/restricted/aws/s2n/error/s2n_errno.c
+++ b/contrib/restricted/aws/s2n/error/s2n_errno.c
@@ -20,7 +20,7 @@
#include <string.h>
#include "error/s2n_errno.h"
-#include <s2n.h>
+#include "api/s2n.h"
#include "utils/s2n_map.h"
#include "utils/s2n_safety.h"
@@ -176,7 +176,8 @@ static const char *no_such_error = "Internal s2n error";
ERR_ENTRY(S2N_ERR_ASYNC_CALLBACK_FAILED, "Callback associated with async private keys function has failed") \
ERR_ENTRY(S2N_ERR_ASYNC_MORE_THAN_ONE, "Only one asynchronous operation can be in-progress at the same time") \
ERR_ENTRY(S2N_ERR_NO_ALERT, "No Alert present") \
- ERR_ENTRY(S2N_ERR_CLIENT_MODE, "operation not allowed in client mode") \
+ ERR_ENTRY(S2N_ERR_SERVER_MODE, "Operation not allowed in server mode") \
+ ERR_ENTRY(S2N_ERR_CLIENT_MODE, "Operation not allowed in client mode") \
ERR_ENTRY(S2N_ERR_CLIENT_MODE_DISABLED, "client connections not allowed") \
ERR_ENTRY(S2N_ERR_TOO_MANY_CERTIFICATES, "only 1 certificate is supported in client mode") \
ERR_ENTRY(S2N_ERR_TOO_MANY_SIGNATURE_SCHEMES, "Max supported length of SignatureAlgorithms/SignatureSchemes list is 32") \
@@ -193,7 +194,7 @@ static const char *no_such_error = "Internal s2n error";
ERR_ENTRY(S2N_ERR_NUM_DEFAULT_CERTIFICATES, "exceeded max default certificates or provided no default") \
ERR_ENTRY(S2N_ERR_MULTIPLE_DEFAULT_CERTIFICATES_PER_AUTH_TYPE, "setting multiple default certificates per auth type is not allowed") \
ERR_ENTRY(S2N_ERR_INVALID_CIPHER_PREFERENCES, "Invalid Cipher Preferences version") \
- ERR_ENTRY(S2N_ERR_APPLICATION_PROTOCOL_TOO_LONG, "Application protocol name is too long") \
+ ERR_ENTRY(S2N_ERR_INVALID_APPLICATION_PROTOCOL, "The supplied application protocol name is invalid") \
ERR_ENTRY(S2N_ERR_KEY_MISMATCH, "public and private key do not match") \
ERR_ENTRY(S2N_ERR_SEND_SIZE, "Retried s2n_send() size is invalid") \
ERR_ENTRY(S2N_ERR_CORK_SET_ON_UNMANAGED, "Attempt to set connection cork management on unmanaged IO") \
@@ -236,10 +237,9 @@ static const char *no_such_error = "Internal s2n error";
ERR_ENTRY(S2N_ERR_INVALID_SECURITY_POLICY, "Invalid security policy") \
ERR_ENTRY(S2N_ERR_INVALID_KEM_PREFERENCES, "Invalid kem preferences version") \
ERR_ENTRY(S2N_ERR_INVALID_PARSED_EXTENSIONS, "Invalid parsed extension data") \
- ERR_ENTRY(S2N_ERR_ASYNC_ALREADY_PERFORMED, "Async operation was already performed, cannot perfom it again") \
+ ERR_ENTRY(S2N_ERR_ASYNC_ALREADY_PERFORMED, "Async operation was already performed, cannot perform it again") \
ERR_ENTRY(S2N_ERR_ASYNC_NOT_PERFORMED, "Async operation is not performed, cannot apply its result") \
ERR_ENTRY(S2N_ERR_ASYNC_WRONG_CONNECTION, "Async private key operation can only be consumed by connection which initiated it") \
- ERR_ENTRY(S2N_ERR_ASYNC_APPLY_WHILE_INVOKING, "Async private key operation cannot consumed inside async pkey callback") \
ERR_ENTRY(S2N_ERR_ASYNC_ALREADY_APPLIED, "Async operation was already applied to connection, cannot apply it again") \
ERR_ENTRY(S2N_ERR_INVALID_HELLO_RETRY, "Invalid hello retry request") \
ERR_ENTRY(S2N_ERR_INVALID_STATE, "Invalid state, this is the result of invalid use of an API. Check the API documentation for the function that raised this error for more info") \
@@ -247,8 +247,27 @@ static const char *no_such_error = "Internal s2n error";
ERR_ENTRY(S2N_ERR_PQ_CRYPTO, "An error occurred in a post-quantum crypto function") \
ERR_ENTRY(S2N_ERR_PQ_DISABLED, "Post-quantum crypto is disabled") \
ERR_ENTRY(S2N_ERR_DUPLICATE_PSK_IDENTITIES, "The list of pre-shared keys provided contains duplicate psk identities") \
+ ERR_ENTRY(S2N_ERR_OFFERED_PSKS_TOO_LONG, "The total pre-shared key data is too long to send over the wire") \
+ ERR_ENTRY(S2N_ERR_INVALID_SESSION_TICKET, "Session ticket data is not valid") \
ERR_ENTRY(S2N_ERR_REENTRANCY, "Original execution must complete before method can be called again") \
-
+ ERR_ENTRY(S2N_ERR_INVALID_CERT_STATE, "Certificate validation entered an invalid state and is not able to continue") \
+ ERR_ENTRY(S2N_ERR_INVALID_EARLY_DATA_STATE, "Early data in invalid state") \
+ ERR_ENTRY(S2N_ERR_EARLY_DATA_NOT_ALLOWED, "Early data is not allowed by the connection") \
+ ERR_ENTRY(S2N_ERR_NO_CERT_FOUND, "Certificate not found") \
+ ERR_ENTRY(S2N_ERR_NO_PRIVATE_KEY, "Certificate found, but no corresponding private key") \
+ ERR_ENTRY(S2N_ERR_CERT_NOT_VALIDATED, "Certificate not validated") \
+ ERR_ENTRY(S2N_ERR_MAX_EARLY_DATA_SIZE, "Maximum early data bytes exceeded") \
+ ERR_ENTRY(S2N_ERR_EARLY_DATA_BLOCKED, "Blocked on early data") \
+ ERR_ENTRY(S2N_ERR_PSK_MODE, "Mixing resumption and external PSKs is not supported") \
+ ERR_ENTRY(S2N_ERR_X509_EXTENSION_VALUE_NOT_FOUND, "X509 extension value not found") \
+ ERR_ENTRY(S2N_ERR_INVALID_X509_EXTENSION_TYPE, "Invalid X509 extension type") \
+ ERR_ENTRY(S2N_ERR_INSUFFICIENT_MEM_SIZE, "The provided buffer size is not large enough to contain the output data. Try increasing the allocation size.") \
+ ERR_ENTRY(S2N_ERR_KEYING_MATERIAL_EXPIRED, "The lifetime of the connection keying material has exceeded the limit. Perform a new full handshake.") \
+ ERR_ENTRY(S2N_ERR_EARLY_DATA_TRIAL_DECRYPT, "Unable to decrypt rejected early data") \
+ ERR_ENTRY(S2N_ERR_PKEY_CTX_INIT, "Unable to initialize the libcrypto pkey context") \
+ ERR_ENTRY(S2N_ERR_FORK_DETECTION_INIT, "Fork detection initialization failed") \
+ ERR_ENTRY(S2N_ERR_RETRIEVE_FORK_GENERATION_NUMBER, "Retrieving fork generation number failed") \
+ ERR_ENTRY(S2N_ERR_SECRET_SCHEDULE_STATE, "Correct inputs to secret calculation not available") \
/* clang-format on */
#define ERR_STR_CASE(ERR, str) case ERR: return str;
@@ -368,7 +387,7 @@ int s2n_calculate_stacktrace(void)
}
int old_errno = errno;
- GUARD(s2n_free_stacktrace());
+ POSIX_GUARD(s2n_free_stacktrace());
void *array[MAX_BACKTRACE_DEPTH];
tl_stacktrace.trace_size = backtrace(array, MAX_BACKTRACE_DEPTH);
tl_stacktrace.trace = backtrace_symbols(array, tl_stacktrace.trace_size);
@@ -386,7 +405,7 @@ int s2n_print_stacktrace(FILE *fptr)
if (!s_s2n_stack_traces_enabled) {
fprintf(fptr, "%s\n%s\n",
"NOTE: Some details are omitted, run with S2N_PRINT_STACKTRACE=1 for a verbose backtrace.",
- "See https://github.com/awslabs/s2n/blob/main/docs/USAGE-GUIDE.md");
+ "See https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md");
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/error/s2n_errno.h b/contrib/restricted/aws/s2n/error/s2n_errno.h
index 1c5afb8f2b..8efe44b705 100644
--- a/contrib/restricted/aws/s2n/error/s2n_errno.h
+++ b/contrib/restricted/aws/s2n/error/s2n_errno.h
@@ -15,9 +15,11 @@
#pragma once
-#include <s2n.h>
+#include "api/s2n.h"
#include <stdio.h>
#include <stdbool.h>
+#include "utils/s2n_ensure.h"
+
/*
* To easily retrieve error types, we split error values into two parts.
* The upper 6 bits describe the error type and the lower bits describe the value within the category.
@@ -56,6 +58,7 @@ typedef enum {
/* S2N_ERR_T_BLOCKED */
S2N_ERR_IO_BLOCKED = S2N_ERR_T_BLOCKED_START,
S2N_ERR_ASYNC_BLOCKED,
+ S2N_ERR_EARLY_DATA_BLOCKED,
S2N_ERR_T_BLOCKED_END,
/* S2N_ERR_T_ALERT */
@@ -119,6 +122,8 @@ typedef enum {
S2N_ERR_MISSING_EXTENSION,
S2N_ERR_UNSUPPORTED_EXTENSION,
S2N_ERR_DUPLICATE_EXTENSION,
+ S2N_ERR_MAX_EARLY_DATA_SIZE,
+ S2N_ERR_EARLY_DATA_TRIAL_DECRYPT,
S2N_ERR_T_PROTO_END,
/* S2N_ERR_T_INTERNAL */
@@ -197,13 +202,18 @@ typedef enum {
S2N_ERR_INVALID_PARSED_EXTENSIONS,
S2N_ERR_ASYNC_CALLBACK_FAILED,
S2N_ERR_ASYNC_MORE_THAN_ONE,
- S2N_ERR_INVALID_STATE,
S2N_ERR_PQ_CRYPTO,
S2N_ERR_PQ_DISABLED,
+ S2N_ERR_INVALID_CERT_STATE,
+ S2N_ERR_INVALID_EARLY_DATA_STATE,
+ S2N_ERR_PKEY_CTX_INIT,
+ S2N_ERR_FORK_DETECTION_INIT,
+ S2N_ERR_RETRIEVE_FORK_GENERATION_NUMBER,
S2N_ERR_T_INTERNAL_END,
/* S2N_ERR_T_USAGE */
S2N_ERR_NO_ALERT = S2N_ERR_T_USAGE_START,
+ S2N_ERR_SERVER_MODE,
S2N_ERR_CLIENT_MODE,
S2N_ERR_CLIENT_MODE_DISABLED,
S2N_ERR_TOO_MANY_CERTIFICATES,
@@ -221,7 +231,7 @@ typedef enum {
S2N_ERR_NUM_DEFAULT_CERTIFICATES,
S2N_ERR_MULTIPLE_DEFAULT_CERTIFICATES_PER_AUTH_TYPE,
S2N_ERR_INVALID_CIPHER_PREFERENCES,
- S2N_ERR_APPLICATION_PROTOCOL_TOO_LONG,
+ S2N_ERR_INVALID_APPLICATION_PROTOCOL,
S2N_ERR_KEY_MISMATCH,
S2N_ERR_SEND_SIZE,
S2N_ERR_CORK_SET_ON_UNMANAGED,
@@ -258,11 +268,23 @@ typedef enum {
S2N_ERR_ASYNC_ALREADY_PERFORMED,
S2N_ERR_ASYNC_NOT_PERFORMED,
S2N_ERR_ASYNC_WRONG_CONNECTION,
- S2N_ERR_ASYNC_APPLY_WHILE_INVOKING,
S2N_ERR_ASYNC_ALREADY_APPLIED,
S2N_ERR_UNSUPPORTED_WITH_QUIC,
S2N_ERR_DUPLICATE_PSK_IDENTITIES,
+ S2N_ERR_OFFERED_PSKS_TOO_LONG,
+ S2N_ERR_INVALID_SESSION_TICKET,
S2N_ERR_REENTRANCY,
+ S2N_ERR_INVALID_STATE,
+ S2N_ERR_EARLY_DATA_NOT_ALLOWED,
+ S2N_ERR_NO_CERT_FOUND,
+ S2N_ERR_CERT_NOT_VALIDATED,
+ S2N_ERR_NO_PRIVATE_KEY,
+ S2N_ERR_PSK_MODE,
+ S2N_ERR_X509_EXTENSION_VALUE_NOT_FOUND,
+ S2N_ERR_INVALID_X509_EXTENSION_TYPE,
+ S2N_ERR_INSUFFICIENT_MEM_SIZE,
+ S2N_ERR_KEYING_MATERIAL_EXPIRED,
+ S2N_ERR_SECRET_SCHEDULE_STATE,
S2N_ERR_T_USAGE_END,
} s2n_error;
@@ -273,41 +295,19 @@ extern __thread const char *s2n_debug_str;
#define STRING_(s) TO_STRING(s)
#define STRING__LINE__ STRING_(__LINE__)
-#define _S2N_DEBUG_LINE "Error encountered in " __FILE__ " line " STRING__LINE__
+#define _S2N_DEBUG_LINE "Error encountered in " __FILE__ ":" STRING__LINE__
#define _S2N_ERROR( x ) do { s2n_debug_str = _S2N_DEBUG_LINE; s2n_errno = ( x ); s2n_calculate_stacktrace(); } while (0)
-#define S2N_ERROR( x ) do { _S2N_ERROR( ( x ) ); return -1; } while (0)
#define S2N_ERROR_PRESERVE_ERRNO() do { return -1; } while (0)
-#define S2N_ERROR_PTR( x ) do { _S2N_ERROR( ( x ) ); return NULL; } while (0)
-#define S2N_ERROR_IF( cond , x ) do { if ( cond ) { S2N_ERROR( x ); }} while (0)
-#define S2N_ERROR_IF_PTR( cond , x ) do { if ( cond ) { S2N_ERROR_PTR( x ); }} while (0)
#define S2N_ERROR_IS_BLOCKING( x ) ( s2n_error_get_type(x) == S2N_ERR_T_BLOCKED )
-/**
- * Define function contracts.
- * When the code is being verified using CBMC these contracts are formally verified;
- * When the code is built in debug mode, they are checked as much as possible using assertions
- * When the code is built in production mode, non-fatal contracts are not checked.
- * Violations of the function contracts are undefined behaviour.
- */
-#ifdef CBMC
-# define S2N_MEM_IS_READABLE(base, len) (((len) == 0) || __CPROVER_r_ok((base), (len)))
-# define S2N_MEM_IS_WRITABLE(base, len) (((len) == 0) || __CPROVER_w_ok((base), (len)))
-#else
-/* the C runtime does not give a way to check these properties,
- * but we can at least check that the pointer is valid */
-# define S2N_MEM_IS_READABLE(base, len) (((len) == 0) || (base) != NULL)
-# define S2N_MEM_IS_WRITABLE(base, len) (((len) == 0) || (base) != NULL)
-#endif /* CBMC */
+/* DEPRECATED: use POSIX_BAIL instead */
+#define S2N_ERROR( x ) do { _S2N_ERROR( ( x ) ); return -1; } while (0)
-#define S2N_OBJECT_PTR_IS_READABLE(ptr) ((ptr) != NULL)
-#define S2N_OBJECT_PTR_IS_WRITABLE(ptr) ((ptr) != NULL)
+/* DEPRECATED: use PTR_BAIL instead */
+#define S2N_ERROR_PTR( x ) do { _S2N_ERROR( ( x ) ); return NULL; } while (0)
-#define S2N_IMPLIES(a, b) (!(a) || (b))
-/**
- * If and only if (iff) is a biconditional logical connective between statements a and b.
- * Equivalent to (S2N_IMPLIES(a, b) && S2N_IMPLIES(b, a)).
- */
-#define S2N_IFF(a, b) (!!(a) == !!(b))
+/* DEPRECATED: use POSIX_ENSURE instead */
+#define S2N_ERROR_IF( cond , x ) do { if ( cond ) { S2N_ERROR( x ); }} while (0)
/** Calculate and print stacktraces */
struct s2n_stacktrace {
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r1/aes_ctr_prf.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r1/aes_ctr_prf.c
index 26c99bc80d..2f211010df 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r1/aes_ctr_prf.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r1/aes_ctr_prf.c
@@ -27,7 +27,7 @@ init_aes_ctr_prf_state(OUT aes_ctr_prf_state_t *s,
bike_static_assert(sizeof(*seed) == sizeof(key.raw), seed_size_equals_ky_size);
memcpy(key.raw, seed->raw, sizeof(key.raw));
- GUARD(aes256_key_expansion(&s->ks_ptr, &key));
+ POSIX_GUARD(aes256_key_expansion(&s->ks_ptr, &key));
// Initialize buffer and counter
s->ctr.u.qw[0] = 0;
@@ -59,7 +59,7 @@ perform_aes(OUT uint8_t *ct, IN OUT aes_ctr_prf_state_t *s)
BIKE_ERROR(E_AES_OVER_USED);
}
- GUARD(aes256_enc(ct, s->ctr.u.bytes, &s->ks_ptr));
+ POSIX_GUARD(aes256_enc(ct, s->ctr.u.bytes, &s->ks_ptr));
s->ctr.u.qw[0]++;
s->rem_invokations--;
@@ -91,11 +91,11 @@ aes_ctr_prf(OUT uint8_t *a, IN OUT aes_ctr_prf_state_t *s, IN const uint32_t len
// Copy full AES blocks
while((len - idx) >= AES256_BLOCK_SIZE)
{
- GUARD(perform_aes(&a[idx], s));
+ POSIX_GUARD(perform_aes(&a[idx], s));
idx += AES256_BLOCK_SIZE;
}
- GUARD(perform_aes(s->buffer.u.bytes, s));
+ POSIX_GUARD(perform_aes(s->buffer.u.bytes, s));
// Copy the tail
s->pos = len - idx;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r1/bike_r1_kem.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r1/bike_r1_kem.c
index 21b0b6f5a3..ba43098837 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r1/bike_r1_kem.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r1/bike_r1_kem.c
@@ -78,18 +78,18 @@ encrypt(OUT ct_t *ct,
p_pk[1].val = pk->val[1];
DMSG(" Sampling m.\n");
- GUARD(sample_uniform_r_bits(&m.val, seed, NO_RESTRICTION));
+ POSIX_GUARD(sample_uniform_r_bits(&m.val, seed, NO_RESTRICTION));
DMSG(" Calculating the ciphertext.\n");
- GUARD(gf2x_mod_mul((uint64_t *)&p_ct[0], (uint64_t *)&m, (uint64_t *)&p_pk[0]));
- GUARD(gf2x_mod_mul((uint64_t *)&p_ct[1], (uint64_t *)&m, (uint64_t *)&p_pk[1]));
+ POSIX_GUARD(gf2x_mod_mul((uint64_t *)&p_ct[0], (uint64_t *)&m, (uint64_t *)&p_pk[0]));
+ POSIX_GUARD(gf2x_mod_mul((uint64_t *)&p_ct[1], (uint64_t *)&m, (uint64_t *)&p_pk[1]));
DMSG(" Addding Error to the ciphertext.\n");
- GUARD(
+ POSIX_GUARD(
gf2x_add(p_ct[0].val.raw, p_ct[0].val.raw, splitted_e->val[0].raw, R_SIZE));
- GUARD(
+ POSIX_GUARD(
gf2x_add(p_ct[1].val.raw, p_ct[1].val.raw, splitted_e->val[1].raw, R_SIZE));
// Copy the data outside
@@ -113,12 +113,12 @@ calc_pk(OUT pk_t *pk, IN const seed_t *g_seed, IN const pad_sk_t p_sk)
// Intialized padding to zero
DEFER_CLEANUP(padded_r_t g = {0}, padded_r_cleanup);
- GUARD(sample_uniform_r_bits(&g.val, g_seed, MUST_BE_ODD));
+ POSIX_GUARD(sample_uniform_r_bits(&g.val, g_seed, MUST_BE_ODD));
// Calculate (g0, g1) = (g*h1, g*h0)
- GUARD(gf2x_mod_mul((uint64_t *)&p_pk[0], (const uint64_t *)&g,
+ POSIX_GUARD(gf2x_mod_mul((uint64_t *)&p_pk[0], (const uint64_t *)&g,
(const uint64_t *)&p_sk[1]));
- GUARD(gf2x_mod_mul((uint64_t *)&p_pk[1], (const uint64_t *)&g,
+ POSIX_GUARD(gf2x_mod_mul((uint64_t *)&p_pk[1], (const uint64_t *)&g,
(const uint64_t *)&p_sk[0]));
// Copy the data to the output parameters.
@@ -156,7 +156,7 @@ get_ss(OUT ss_t *out, IN const e_t *e)
int
BIKE1_L1_R1_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk)
{
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
// Convert to this implementation types
pk_t *l_pk = (pk_t *)pk;
@@ -177,14 +177,14 @@ BIKE1_L1_R1_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk)
DMSG(" Calculating the secret key.\n");
// h0 and h1 use the same context
- GUARD(init_aes_ctr_prf_state(&h_prf_state, MAX_AES_INVOKATION, &seeds.seed[0]));
+ POSIX_GUARD(init_aes_ctr_prf_state(&h_prf_state, MAX_AES_INVOKATION, &seeds.seed[0]));
- GUARD(generate_sparse_rep((uint64_t *)&p_sk[0], l_sk.wlist[0].val, DV, R_BITS,
+ POSIX_GUARD(generate_sparse_rep((uint64_t *)&p_sk[0], l_sk.wlist[0].val, DV, R_BITS,
sizeof(p_sk[0]), &h_prf_state));
// Copy data
l_sk.bin[0] = p_sk[0].val;
- GUARD(generate_sparse_rep((uint64_t *)&p_sk[1], l_sk.wlist[1].val, DV, R_BITS,
+ POSIX_GUARD(generate_sparse_rep((uint64_t *)&p_sk[1], l_sk.wlist[1].val, DV, R_BITS,
sizeof(p_sk[1]), &h_prf_state));
// Copy data
@@ -192,7 +192,7 @@ BIKE1_L1_R1_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk)
DMSG(" Calculating the public key.\n");
- GUARD(calc_pk(l_pk, &seeds.seed[1], p_sk));
+ POSIX_GUARD(calc_pk(l_pk, &seeds.seed[1], p_sk));
memcpy(sk, &l_sk, sizeof(l_sk));
@@ -214,7 +214,7 @@ BIKE1_L1_R1_crypto_kem_enc(OUT unsigned char * ct,
IN const unsigned char *pk)
{
DMSG(" Enter crypto_kem_enc.\n");
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
// Convert to this implementation types
const pk_t *l_pk = (const pk_t *)pk;
@@ -231,11 +231,11 @@ BIKE1_L1_R1_crypto_kem_enc(OUT unsigned char * ct,
// Random data generator
// Using first seed
- GUARD(init_aes_ctr_prf_state(&e_prf_state, MAX_AES_INVOKATION, &seeds.seed[0]));
+ POSIX_GUARD(init_aes_ctr_prf_state(&e_prf_state, MAX_AES_INVOKATION, &seeds.seed[0]));
DMSG(" Generating error.\n");
ALIGN(8) compressed_idx_t_t dummy;
- GUARD(generate_sparse_rep((uint64_t *)&e, dummy.val, T1, N_BITS, sizeof(e),
+ POSIX_GUARD(generate_sparse_rep((uint64_t *)&e, dummy.val, T1, N_BITS, sizeof(e),
&e_prf_state));
print("e: ", (uint64_t *)&e.val, sizeof(e) * 8);
@@ -250,7 +250,7 @@ BIKE1_L1_R1_crypto_kem_enc(OUT unsigned char * ct,
// Computing ct = enc(pk, e)
// Using second seed
DMSG(" Encrypting.\n");
- GUARD(encrypt(l_ct, l_pk, &seeds.seed[1], &splitted_e));
+ POSIX_GUARD(encrypt(l_ct, l_pk, &seeds.seed[1], &splitted_e));
DMSG(" Generating shared secret.\n");
get_ss(l_ss, &e.val);
@@ -269,7 +269,7 @@ BIKE1_L1_R1_crypto_kem_dec(OUT unsigned char * ss,
IN const unsigned char *sk)
{
DMSG(" Enter crypto_kem_dec.\n");
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
// Convert to this implementation types
const ct_t *l_ct = (const ct_t *)ct;
@@ -284,10 +284,10 @@ BIKE1_L1_R1_crypto_kem_dec(OUT unsigned char * ss,
DEFER_CLEANUP(e_t merged_e = {0}, e_cleanup);
DMSG(" Computing s.\n");
- GUARD(compute_syndrome(&syndrome, l_ct, &l_sk));
+ POSIX_GUARD(compute_syndrome(&syndrome, l_ct, &l_sk));
DMSG(" Decoding.\n");
- GUARD(decode(&e, &syndrome, l_ct, &l_sk));
+ POSIX_GUARD(decode(&e, &syndrome, l_ct, &l_sk));
// Check if the error weight equals T1
if(T1 != r_bits_vector_weight(&e.val[0]) + r_bits_vector_weight(&e.val[1]))
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r1/decode.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r1/decode.c
index 404c6377da..b455cd7e82 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r1/decode.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r1/decode.c
@@ -96,12 +96,12 @@ compute_syndrome(OUT syndrome_t *syndrome, IN const ct_t *ct, IN const sk_t *sk)
pad_ct[1].val = ct->val[1];
// Compute s = c0*h0 + c1*h1:
- GUARD(gf2x_mod_mul((uint64_t *)&pad_s[0], (uint64_t *)&pad_ct[0],
+ POSIX_GUARD(gf2x_mod_mul((uint64_t *)&pad_s[0], (uint64_t *)&pad_ct[0],
(uint64_t *)&pad_sk[0]));
- GUARD(gf2x_mod_mul((uint64_t *)&pad_s[1], (uint64_t *)&pad_ct[1],
+ POSIX_GUARD(gf2x_mod_mul((uint64_t *)&pad_s[1], (uint64_t *)&pad_ct[1],
(uint64_t *)&pad_sk[1]));
- GUARD(gf2x_add(pad_s[0].val.raw, pad_s[0].val.raw, pad_s[1].val.raw, R_SIZE));
+ POSIX_GUARD(gf2x_add(pad_s[0].val.raw, pad_s[0].val.raw, pad_s[1].val.raw, R_SIZE));
memcpy((uint8_t *)syndrome->qw, pad_s[0].val.raw, R_SIZE);
dup(syndrome);
@@ -118,13 +118,13 @@ recompute_syndrome(OUT syndrome_t *syndrome,
ct_t tmp_ct = *ct;
// Adapt the ciphertext
- GUARD(gf2x_add(tmp_ct.val[0].raw, tmp_ct.val[0].raw, splitted_e->val[0].raw,
+ POSIX_GUARD(gf2x_add(tmp_ct.val[0].raw, tmp_ct.val[0].raw, splitted_e->val[0].raw,
R_SIZE));
- GUARD(gf2x_add(tmp_ct.val[1].raw, tmp_ct.val[1].raw, splitted_e->val[1].raw,
+ POSIX_GUARD(gf2x_add(tmp_ct.val[1].raw, tmp_ct.val[1].raw, splitted_e->val[1].raw,
R_SIZE));
// Recompute the syndrome
- GUARD(compute_syndrome(syndrome, &tmp_ct, sk));
+ POSIX_GUARD(compute_syndrome(syndrome, &tmp_ct, sk));
return SUCCESS;
}
@@ -334,7 +334,7 @@ decode(OUT split_e_t *e,
DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
find_err1(e, &black_e, &gray_e, &s, sk->wlist, threshold);
- GUARD(recompute_syndrome(&s, ct, sk, e));
+ POSIX_GUARD(recompute_syndrome(&s, ct, sk, e));
#ifdef BGF_DECODER
if(iter >= 1)
{
@@ -346,14 +346,14 @@ decode(OUT split_e_t *e,
DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
find_err2(e, &black_e, &s, sk->wlist, ((DV + 1) / 2) + 1);
- GUARD(recompute_syndrome(&s, ct, sk, e));
+ POSIX_GUARD(recompute_syndrome(&s, ct, sk, e));
DMSG(" Weight of e: %lu\n",
r_bits_vector_weight(&e->val[0]) + r_bits_vector_weight(&e->val[1]));
DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
find_err2(e, &gray_e, &s, sk->wlist, ((DV + 1) / 2) + 1);
- GUARD(recompute_syndrome(&s, ct, sk, e));
+ POSIX_GUARD(recompute_syndrome(&s, ct, sk, e));
}
if(r_bits_vector_weight((r_t *)s.qw) > 0)
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r1/openssl_utils.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r1/openssl_utils.c
index 09e0af3fde..c80d3365cb 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r1/openssl_utils.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r1/openssl_utils.c
@@ -108,15 +108,15 @@ ossl_add(OUT uint8_t res_bin[R_SIZE],
BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
}
- GUARD(ossl_bin2bn(a, a_bin, R_SIZE));
- GUARD(ossl_bin2bn(b, b_bin, R_SIZE));
+ POSIX_GUARD(ossl_bin2bn(a, a_bin, R_SIZE));
+ POSIX_GUARD(ossl_bin2bn(b, b_bin, R_SIZE));
if(BN_GF2m_add(r, a, b) == 0)
{
BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
}
- GUARD(ossl_bn2bin(res_bin, r, R_SIZE));
+ POSIX_GUARD(ossl_bn2bin(res_bin, r, R_SIZE));
return SUCCESS;
}
@@ -176,10 +176,10 @@ cyclic_product(OUT uint8_t res_bin[R_SIZE],
BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
}
- GUARD(ossl_bin2bn(a, a_bin, R_SIZE));
- GUARD(ossl_bin2bn(b, b_bin, R_SIZE));
- GUARD(ossl_cyclic_product(r, a, b, bn_ctx));
- GUARD(ossl_bn2bin(res_bin, r, R_SIZE));
+ POSIX_GUARD(ossl_bin2bn(a, a_bin, R_SIZE));
+ POSIX_GUARD(ossl_bin2bn(b, b_bin, R_SIZE));
+ POSIX_GUARD(ossl_cyclic_product(r, a, b, bn_ctx));
+ POSIX_GUARD(ossl_bn2bin(res_bin, r, R_SIZE));
return SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r1/sampling.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r1/sampling.c
index 3686338fad..d08fa5eea7 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r1/sampling.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r1/sampling.c
@@ -20,7 +20,7 @@ get_rand_mod_len(OUT uint32_t * rand_pos,
do
{
// Generate 128bit of random numbers
- GUARD(aes_ctr_prf((uint8_t *)rand_pos, prf_state, sizeof(*rand_pos)));
+ POSIX_GUARD(aes_ctr_prf((uint8_t *)rand_pos, prf_state, sizeof(*rand_pos)));
// Mask only relevant bits
(*rand_pos) &= mask;
@@ -56,7 +56,7 @@ sample_uniform_r_bits_with_fixed_prf_context(OUT r_t *r,
IN const must_be_odd_t must_be_odd)
{
// Generate random data
- GUARD(aes_ctr_prf(r->raw, prf_state, R_SIZE));
+ POSIX_GUARD(aes_ctr_prf(r->raw, prf_state, R_SIZE));
// Mask upper bits of the MSByte
r->raw[R_SIZE - 1] &= MASK(R_BITS + 8 - (R_SIZE * 8));
@@ -104,7 +104,7 @@ generate_sparse_rep(OUT uint64_t * a,
// Generate weight rand numbers
do
{
- GUARD(get_rand_mod_len(&wlist[ctr], len, prf_state));
+ POSIX_GUARD(get_rand_mod_len(&wlist[ctr], len, prf_state));
ctr += is_new(wlist, ctr);
} while(ctr < weight);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r1/sampling.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r1/sampling.h
index 1ffd56f34a..4ec60683de 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r1/sampling.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r1/sampling.h
@@ -53,9 +53,9 @@ sample_uniform_r_bits(OUT r_t *r,
// For the seedexpander
DEFER_CLEANUP(aes_ctr_prf_state_t prf_state = {0}, aes_ctr_prf_state_cleanup);
- GUARD(init_aes_ctr_prf_state(&prf_state, MAX_AES_INVOKATION, seed));
+ POSIX_GUARD(init_aes_ctr_prf_state(&prf_state, MAX_AES_INVOKATION, seed));
- GUARD(sample_uniform_r_bits_with_fixed_prf_context(r, &prf_state, must_be_odd));
+ POSIX_GUARD(sample_uniform_r_bits_with_fixed_prf_context(r, &prf_state, must_be_odd));
return SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.c
index 26c99bc80d..2f211010df 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.c
@@ -27,7 +27,7 @@ init_aes_ctr_prf_state(OUT aes_ctr_prf_state_t *s,
bike_static_assert(sizeof(*seed) == sizeof(key.raw), seed_size_equals_ky_size);
memcpy(key.raw, seed->raw, sizeof(key.raw));
- GUARD(aes256_key_expansion(&s->ks_ptr, &key));
+ POSIX_GUARD(aes256_key_expansion(&s->ks_ptr, &key));
// Initialize buffer and counter
s->ctr.u.qw[0] = 0;
@@ -59,7 +59,7 @@ perform_aes(OUT uint8_t *ct, IN OUT aes_ctr_prf_state_t *s)
BIKE_ERROR(E_AES_OVER_USED);
}
- GUARD(aes256_enc(ct, s->ctr.u.bytes, &s->ks_ptr));
+ POSIX_GUARD(aes256_enc(ct, s->ctr.u.bytes, &s->ks_ptr));
s->ctr.u.qw[0]++;
s->rem_invokations--;
@@ -91,11 +91,11 @@ aes_ctr_prf(OUT uint8_t *a, IN OUT aes_ctr_prf_state_t *s, IN const uint32_t len
// Copy full AES blocks
while((len - idx) >= AES256_BLOCK_SIZE)
{
- GUARD(perform_aes(&a[idx], s));
+ POSIX_GUARD(perform_aes(&a[idx], s));
idx += AES256_BLOCK_SIZE;
}
- GUARD(perform_aes(s->buffer.u.bytes, s));
+ POSIX_GUARD(perform_aes(s->buffer.u.bytes, s));
// Copy the tail
s->pos = len - idx;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_r2_kem.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_r2_kem.c
index 8f29f3add9..e7797848a0 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_r2_kem.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_r2_kem.c
@@ -61,12 +61,12 @@ calc_pk(OUT pk_t *pk, IN const seed_t *g_seed, IN const pad_sk_t p_sk)
// Intialized padding to zero
DEFER_CLEANUP(padded_r_t g = {0}, padded_r_cleanup);
- GUARD(sample_uniform_r_bits(&g.val, g_seed, MUST_BE_ODD));
+ POSIX_GUARD(sample_uniform_r_bits(&g.val, g_seed, MUST_BE_ODD));
// Calculate (g0, g1) = (g*h1, g*h0)
- GUARD(gf2x_mod_mul((uint64_t *)&p_pk[0], (const uint64_t *)&g,
+ POSIX_GUARD(gf2x_mod_mul((uint64_t *)&p_pk[0], (const uint64_t *)&g,
(const uint64_t *)&p_sk[1]));
- GUARD(gf2x_mod_mul((uint64_t *)&p_pk[1], (const uint64_t *)&g,
+ POSIX_GUARD(gf2x_mod_mul((uint64_t *)&p_pk[1], (const uint64_t *)&g,
(const uint64_t *)&p_sk[0]));
// Copy the data to the output parameters.
@@ -102,12 +102,12 @@ function_h(OUT split_e_t *splitted_e, IN const r_t *in0, IN const r_t *in1)
// Use the seed to generate a sparse error vector e:
DMSG(" Generating random error.\n");
- GUARD(init_aes_ctr_prf_state(&prf_state, MAX_AES_INVOKATION, &seed_for_hash));
+ POSIX_GUARD(init_aes_ctr_prf_state(&prf_state, MAX_AES_INVOKATION, &seed_for_hash));
DEFER_CLEANUP(padded_e_t e, padded_e_cleanup);
DEFER_CLEANUP(ALIGN(8) compressed_idx_t_t dummy, compressed_idx_t_cleanup);
- GUARD(generate_sparse_rep((uint64_t *)&e, dummy.val, T1, N_BITS, sizeof(e),
+ POSIX_GUARD(generate_sparse_rep((uint64_t *)&e, dummy.val, T1, N_BITS, sizeof(e),
&prf_state));
split_e(splitted_e, &e.val);
@@ -120,7 +120,7 @@ encrypt(OUT ct_t *ct, OUT split_e_t *mf, IN const pk_t *pk, IN const seed_t *see
DEFER_CLEANUP(padded_r_t m = {0}, padded_r_cleanup);
DMSG(" Sampling m.\n");
- GUARD(sample_uniform_r_bits(&m.val, seed, NO_RESTRICTION));
+ POSIX_GUARD(sample_uniform_r_bits(&m.val, seed, NO_RESTRICTION));
// Pad the public key
pad_pk_t p_pk = {0};
@@ -135,20 +135,20 @@ encrypt(OUT ct_t *ct, OUT split_e_t *mf, IN const pk_t *pk, IN const seed_t *see
DEFER_CLEANUP(dbl_pad_ct_t mf_int = {0}, dbl_pad_ct_cleanup);
DMSG(" Computing m*f0 and m*f1.\n");
- GUARD(
+ POSIX_GUARD(
gf2x_mod_mul((uint64_t *)&mf_int[0], (uint64_t *)&m, (uint64_t *)&p_pk[0]));
- GUARD(
+ POSIX_GUARD(
gf2x_mod_mul((uint64_t *)&mf_int[1], (uint64_t *)&m, (uint64_t *)&p_pk[1]));
DEFER_CLEANUP(split_e_t splitted_e, split_e_cleanup);
DMSG(" Computing the hash function e <- H(m*f0, m*f1).\n");
- GUARD(function_h(&splitted_e, &mf_int[0].val, &mf_int[1].val));
+ POSIX_GUARD(function_h(&splitted_e, &mf_int[0].val, &mf_int[1].val));
DMSG(" Addding Error to the ciphertext.\n");
- GUARD(gf2x_add(p_ct[0].val.raw, mf_int[0].val.raw, splitted_e.val[0].raw,
+ POSIX_GUARD(gf2x_add(p_ct[0].val.raw, mf_int[0].val.raw, splitted_e.val[0].raw,
R_SIZE));
- GUARD(gf2x_add(p_ct[1].val.raw, mf_int[1].val.raw, splitted_e.val[1].raw,
+ POSIX_GUARD(gf2x_add(p_ct[1].val.raw, mf_int[1].val.raw, splitted_e.val[1].raw,
R_SIZE));
// Copy the data to the output parameters.
@@ -174,11 +174,11 @@ reencrypt(OUT pad_ct_t ce,
IN const ct_t *l_ct)
{
// Compute (c0 + e0') and (c1 + e1')
- GUARD(gf2x_add(ce[0].val.raw, l_ct->val[0].raw, e->val[0].raw, R_SIZE));
- GUARD(gf2x_add(ce[1].val.raw, l_ct->val[1].raw, e->val[1].raw, R_SIZE));
+ POSIX_GUARD(gf2x_add(ce[0].val.raw, l_ct->val[0].raw, e->val[0].raw, R_SIZE));
+ POSIX_GUARD(gf2x_add(ce[1].val.raw, l_ct->val[1].raw, e->val[1].raw, R_SIZE));
// (e0'', e1'') <-- H(c0 + e0', c1 + e1')
- GUARD(function_h(e2, &ce[0].val, &ce[1].val));
+ POSIX_GUARD(function_h(e2, &ce[0].val, &ce[1].val));
return SUCCESS;
}
@@ -212,10 +212,10 @@ get_ss(OUT ss_t *out, IN const r_t *in0, IN const r_t *in1, IN const ct_t *ct)
int
BIKE1_L1_R2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk)
{
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
- notnull_check(sk);
- notnull_check(pk);
+ POSIX_ENSURE_REF(sk);
+ POSIX_ENSURE_REF(pk);
// Convert to this implementation types
pk_t *l_pk = (pk_t *)pk;
@@ -232,27 +232,27 @@ BIKE1_L1_R2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk)
DEFER_CLEANUP(pad_sk_t p_sk = {0}, pad_sk_cleanup);
// Get the entropy seeds.
- GUARD(get_seeds(&seeds));
+ POSIX_GUARD(get_seeds(&seeds));
DMSG(" Enter crypto_kem_keypair.\n");
DMSG(" Calculating the secret key.\n");
// h0 and h1 use the same context
- GUARD(init_aes_ctr_prf_state(&h_prf_state, MAX_AES_INVOKATION, &seeds.seed[0]));
+ POSIX_GUARD(init_aes_ctr_prf_state(&h_prf_state, MAX_AES_INVOKATION, &seeds.seed[0]));
// sigma0/1/2 use the same context.
- GUARD(init_aes_ctr_prf_state(&s_prf_state, MAX_AES_INVOKATION, &seeds.seed[2]));
+ POSIX_GUARD(init_aes_ctr_prf_state(&s_prf_state, MAX_AES_INVOKATION, &seeds.seed[2]));
- GUARD(generate_sparse_rep((uint64_t *)&p_sk[0], l_sk.wlist[0].val, DV, R_BITS,
+ POSIX_GUARD(generate_sparse_rep((uint64_t *)&p_sk[0], l_sk.wlist[0].val, DV, R_BITS,
sizeof(p_sk[0]), &h_prf_state));
// Sample the sigmas
- GUARD(sample_uniform_r_bits_with_fixed_prf_context(&l_sk.sigma0, &s_prf_state,
+ POSIX_GUARD(sample_uniform_r_bits_with_fixed_prf_context(&l_sk.sigma0, &s_prf_state,
NO_RESTRICTION));
- GUARD(sample_uniform_r_bits_with_fixed_prf_context(&l_sk.sigma1, &s_prf_state,
+ POSIX_GUARD(sample_uniform_r_bits_with_fixed_prf_context(&l_sk.sigma1, &s_prf_state,
NO_RESTRICTION));
- GUARD(generate_sparse_rep((uint64_t *)&p_sk[1], l_sk.wlist[1].val, DV, R_BITS,
+ POSIX_GUARD(generate_sparse_rep((uint64_t *)&p_sk[1], l_sk.wlist[1].val, DV, R_BITS,
sizeof(p_sk[1]), &h_prf_state));
// Copy data
@@ -261,7 +261,7 @@ BIKE1_L1_R2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk)
DMSG(" Calculating the public key.\n");
- GUARD(calc_pk(l_pk, &seeds.seed[1], p_sk));
+ POSIX_GUARD(calc_pk(l_pk, &seeds.seed[1], p_sk));
memcpy(sk, &l_sk, sizeof(l_sk));
@@ -286,29 +286,29 @@ BIKE1_L1_R2_crypto_kem_enc(OUT unsigned char * ct,
IN const unsigned char *pk)
{
DMSG(" Enter crypto_kem_enc.\n");
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
// Convert to the types that are used by this implementation
const pk_t *l_pk = (const pk_t *)pk;
ct_t * l_ct = (ct_t *)ct;
ss_t * l_ss = (ss_t *)ss;
- notnull_check(pk);
- notnull_check(ct);
- notnull_check(ss);
+ POSIX_ENSURE_REF(pk);
+ POSIX_ENSURE_REF(ct);
+ POSIX_ENSURE_REF(ss);
// For NIST DRBG_CTR
DEFER_CLEANUP(seeds_t seeds = {0}, seeds_cleanup);
// Get the entropy seeds.
- GUARD(get_seeds(&seeds));
+ POSIX_GUARD(get_seeds(&seeds));
DMSG(" Encrypting.\n");
// In fact, seed[0] should be used.
// Here, we stay consistent with BIKE's reference code
// that chooses the seconde seed.
DEFER_CLEANUP(split_e_t mf, split_e_cleanup);
- GUARD(encrypt(l_ct, &mf, l_pk, &seeds.seed[1]));
+ POSIX_GUARD(encrypt(l_ct, &mf, l_pk, &seeds.seed[1]));
DMSG(" Generating shared secret.\n");
get_ss(l_ss, &mf.val[0], &mf.val[1], l_ct);
@@ -327,14 +327,14 @@ BIKE1_L1_R2_crypto_kem_dec(OUT unsigned char * ss,
IN const unsigned char *sk)
{
DMSG(" Enter crypto_kem_dec.\n");
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
// Convert to the types used by this implementation
const ct_t *l_ct = (const ct_t *)ct;
ss_t * l_ss = (ss_t *)ss;
- notnull_check(sk);
- notnull_check(ct);
- notnull_check(ss);
+ POSIX_ENSURE_REF(sk);
+ POSIX_ENSURE_REF(ct);
+ POSIX_ENSURE_REF(ss);
DEFER_CLEANUP(ALIGN(8) sk_t l_sk, sk_cleanup);
memcpy(&l_sk, sk, sizeof(l_sk));
@@ -344,14 +344,14 @@ BIKE1_L1_R2_crypto_kem_dec(OUT unsigned char * ss,
DEFER_CLEANUP(split_e_t e, split_e_cleanup);
DMSG(" Computing s.\n");
- GUARD(compute_syndrome(&syndrome, l_ct, &l_sk));
+ POSIX_GUARD(compute_syndrome(&syndrome, l_ct, &l_sk));
DMSG(" Decoding.\n");
uint32_t dec_ret = decode(&e, &syndrome, l_ct, &l_sk) != SUCCESS ? 0 : 1;
DEFER_CLEANUP(split_e_t e2, split_e_cleanup);
DEFER_CLEANUP(pad_ct_t ce, pad_ct_cleanup);
- GUARD(reencrypt(ce, &e2, &e, l_ct));
+ POSIX_GUARD(reencrypt(ce, &e2, &e, l_ct));
// Check if the decoding is successful.
// Check if the error weight equals T1.
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.c
index 404c6377da..b455cd7e82 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.c
@@ -96,12 +96,12 @@ compute_syndrome(OUT syndrome_t *syndrome, IN const ct_t *ct, IN const sk_t *sk)
pad_ct[1].val = ct->val[1];
// Compute s = c0*h0 + c1*h1:
- GUARD(gf2x_mod_mul((uint64_t *)&pad_s[0], (uint64_t *)&pad_ct[0],
+ POSIX_GUARD(gf2x_mod_mul((uint64_t *)&pad_s[0], (uint64_t *)&pad_ct[0],
(uint64_t *)&pad_sk[0]));
- GUARD(gf2x_mod_mul((uint64_t *)&pad_s[1], (uint64_t *)&pad_ct[1],
+ POSIX_GUARD(gf2x_mod_mul((uint64_t *)&pad_s[1], (uint64_t *)&pad_ct[1],
(uint64_t *)&pad_sk[1]));
- GUARD(gf2x_add(pad_s[0].val.raw, pad_s[0].val.raw, pad_s[1].val.raw, R_SIZE));
+ POSIX_GUARD(gf2x_add(pad_s[0].val.raw, pad_s[0].val.raw, pad_s[1].val.raw, R_SIZE));
memcpy((uint8_t *)syndrome->qw, pad_s[0].val.raw, R_SIZE);
dup(syndrome);
@@ -118,13 +118,13 @@ recompute_syndrome(OUT syndrome_t *syndrome,
ct_t tmp_ct = *ct;
// Adapt the ciphertext
- GUARD(gf2x_add(tmp_ct.val[0].raw, tmp_ct.val[0].raw, splitted_e->val[0].raw,
+ POSIX_GUARD(gf2x_add(tmp_ct.val[0].raw, tmp_ct.val[0].raw, splitted_e->val[0].raw,
R_SIZE));
- GUARD(gf2x_add(tmp_ct.val[1].raw, tmp_ct.val[1].raw, splitted_e->val[1].raw,
+ POSIX_GUARD(gf2x_add(tmp_ct.val[1].raw, tmp_ct.val[1].raw, splitted_e->val[1].raw,
R_SIZE));
// Recompute the syndrome
- GUARD(compute_syndrome(syndrome, &tmp_ct, sk));
+ POSIX_GUARD(compute_syndrome(syndrome, &tmp_ct, sk));
return SUCCESS;
}
@@ -334,7 +334,7 @@ decode(OUT split_e_t *e,
DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
find_err1(e, &black_e, &gray_e, &s, sk->wlist, threshold);
- GUARD(recompute_syndrome(&s, ct, sk, e));
+ POSIX_GUARD(recompute_syndrome(&s, ct, sk, e));
#ifdef BGF_DECODER
if(iter >= 1)
{
@@ -346,14 +346,14 @@ decode(OUT split_e_t *e,
DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
find_err2(e, &black_e, &s, sk->wlist, ((DV + 1) / 2) + 1);
- GUARD(recompute_syndrome(&s, ct, sk, e));
+ POSIX_GUARD(recompute_syndrome(&s, ct, sk, e));
DMSG(" Weight of e: %lu\n",
r_bits_vector_weight(&e->val[0]) + r_bits_vector_weight(&e->val[1]));
DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
find_err2(e, &gray_e, &s, sk->wlist, ((DV + 1) / 2) + 1);
- GUARD(recompute_syndrome(&s, ct, sk, e));
+ POSIX_GUARD(recompute_syndrome(&s, ct, sk, e));
}
if(r_bits_vector_weight((r_t *)s.qw) > 0)
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.c
index 09e0af3fde..c80d3365cb 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.c
@@ -108,15 +108,15 @@ ossl_add(OUT uint8_t res_bin[R_SIZE],
BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
}
- GUARD(ossl_bin2bn(a, a_bin, R_SIZE));
- GUARD(ossl_bin2bn(b, b_bin, R_SIZE));
+ POSIX_GUARD(ossl_bin2bn(a, a_bin, R_SIZE));
+ POSIX_GUARD(ossl_bin2bn(b, b_bin, R_SIZE));
if(BN_GF2m_add(r, a, b) == 0)
{
BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
}
- GUARD(ossl_bn2bin(res_bin, r, R_SIZE));
+ POSIX_GUARD(ossl_bn2bin(res_bin, r, R_SIZE));
return SUCCESS;
}
@@ -176,10 +176,10 @@ cyclic_product(OUT uint8_t res_bin[R_SIZE],
BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
}
- GUARD(ossl_bin2bn(a, a_bin, R_SIZE));
- GUARD(ossl_bin2bn(b, b_bin, R_SIZE));
- GUARD(ossl_cyclic_product(r, a, b, bn_ctx));
- GUARD(ossl_bn2bin(res_bin, r, R_SIZE));
+ POSIX_GUARD(ossl_bin2bn(a, a_bin, R_SIZE));
+ POSIX_GUARD(ossl_bin2bn(b, b_bin, R_SIZE));
+ POSIX_GUARD(ossl_cyclic_product(r, a, b, bn_ctx));
+ POSIX_GUARD(ossl_bn2bin(res_bin, r, R_SIZE));
return SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.c
index 3686338fad..d08fa5eea7 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.c
@@ -20,7 +20,7 @@ get_rand_mod_len(OUT uint32_t * rand_pos,
do
{
// Generate 128bit of random numbers
- GUARD(aes_ctr_prf((uint8_t *)rand_pos, prf_state, sizeof(*rand_pos)));
+ POSIX_GUARD(aes_ctr_prf((uint8_t *)rand_pos, prf_state, sizeof(*rand_pos)));
// Mask only relevant bits
(*rand_pos) &= mask;
@@ -56,7 +56,7 @@ sample_uniform_r_bits_with_fixed_prf_context(OUT r_t *r,
IN const must_be_odd_t must_be_odd)
{
// Generate random data
- GUARD(aes_ctr_prf(r->raw, prf_state, R_SIZE));
+ POSIX_GUARD(aes_ctr_prf(r->raw, prf_state, R_SIZE));
// Mask upper bits of the MSByte
r->raw[R_SIZE - 1] &= MASK(R_BITS + 8 - (R_SIZE * 8));
@@ -104,7 +104,7 @@ generate_sparse_rep(OUT uint64_t * a,
// Generate weight rand numbers
do
{
- GUARD(get_rand_mod_len(&wlist[ctr], len, prf_state));
+ POSIX_GUARD(get_rand_mod_len(&wlist[ctr], len, prf_state));
ctr += is_new(wlist, ctr);
} while(ctr < weight);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.h
index 1ffd56f34a..4ec60683de 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.h
@@ -53,9 +53,9 @@ sample_uniform_r_bits(OUT r_t *r,
// For the seedexpander
DEFER_CLEANUP(aes_ctr_prf_state_t prf_state = {0}, aes_ctr_prf_state_cleanup);
- GUARD(init_aes_ctr_prf_state(&prf_state, MAX_AES_INVOKATION, seed));
+ POSIX_GUARD(init_aes_ctr_prf_state(&prf_state, MAX_AES_INVOKATION, seed));
- GUARD(sample_uniform_r_bits_with_fixed_prf_context(r, &prf_state, must_be_odd));
+ POSIX_GUARD(sample_uniform_r_bits_with_fixed_prf_context(r, &prf_state, must_be_odd));
return SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/LICENSE b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/LICENSE
new file mode 100644
index 0000000000..7a4a3ea242
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. \ No newline at end of file
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes.h
new file mode 100644
index 0000000000..b8b04c3655
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes.h
@@ -0,0 +1,62 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+#include <openssl/evp.h>
+
+#include "cleanup.h"
+
+#define MAX_AES_INVOKATION (MASK(32))
+
+#define AES256_KEY_BYTES (32U)
+#define AES256_KEY_BITS (AES256_KEY_BYTES * 8)
+#define AES256_BLOCK_BYTES (16U)
+#define AES256_ROUNDS (14U)
+
+typedef ALIGN(16) struct aes256_key_s {
+ uint8_t raw[AES256_KEY_BYTES];
+} aes256_key_t;
+
+CLEANUP_FUNC(aes256_key, aes256_key_t)
+
+// Using OpenSSL structures
+typedef EVP_CIPHER_CTX *aes256_ks_t;
+
+_INLINE_ ret_t aes256_key_expansion(OUT aes256_ks_t *ks,
+ IN const aes256_key_t *key)
+{
+ *ks = EVP_CIPHER_CTX_new();
+ if(*ks == NULL) {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+ if(0 == EVP_EncryptInit_ex(*ks, EVP_aes_256_ecb(), NULL, key->raw, NULL)) {
+ EVP_CIPHER_CTX_free(*ks);
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+
+ EVP_CIPHER_CTX_set_padding(*ks, 0);
+
+ return SUCCESS;
+}
+
+_INLINE_ ret_t aes256_enc(OUT uint8_t *ct,
+ IN const uint8_t *pt,
+ IN const aes256_ks_t *ks)
+{
+ int outlen = 0;
+ if(0 == EVP_EncryptUpdate(*ks, ct, &outlen, pt, AES256_BLOCK_BYTES)) {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+ return SUCCESS;
+}
+
+_INLINE_ void aes256_free_ks(OUT aes256_ks_t *ks)
+{
+ EVP_CIPHER_CTX_free(*ks);
+ ks = NULL;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes_ctr_prf.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes_ctr_prf.c
new file mode 100644
index 0000000000..9b50469ef1
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes_ctr_prf.c
@@ -0,0 +1,97 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#include "aes_ctr_prf.h"
+#include "utilities.h"
+
+ret_t init_aes_ctr_prf_state(OUT aes_ctr_prf_state_t *s,
+ IN const uint32_t max_invokations,
+ IN const seed_t *seed)
+{
+ if(0 == max_invokations) {
+ BIKE_ERROR(E_AES_CTR_PRF_INIT_FAIL);
+ }
+
+ // Set the key schedule (from seed).
+ // Make sure the size matches the AES256 key size.
+ DEFER_CLEANUP(aes256_key_t key, aes256_key_cleanup);
+
+ bike_static_assert(sizeof(*seed) == sizeof(key.raw), seed_size_equals_ky_size);
+ bike_memcpy(key.raw, seed->raw, sizeof(key.raw));
+
+ POSIX_GUARD(aes256_key_expansion(&s->ks, &key));
+
+ // Initialize buffer and counter
+ s->ctr.u.qw[0] = 0;
+ s->ctr.u.qw[1] = 0;
+ s->buffer.u.qw[0] = 0;
+ s->buffer.u.qw[1] = 0;
+
+ s->pos = AES256_BLOCK_BYTES;
+ s->rem_invokations = max_invokations;
+
+ DMSG(" Init aes_prf_ctr state:\n");
+ DMSG(" s.pos = %d\n", s->pos);
+ DMSG(" s.rem_invokations = %u\n", s->rem_invokations);
+
+ return SUCCESS;
+}
+
+_INLINE_ ret_t perform_aes(OUT uint8_t *ct, IN OUT aes_ctr_prf_state_t *s)
+{
+ // Ensure that the CTR is large enough
+ bike_static_assert(
+ ((sizeof(s->ctr.u.qw[0]) == 8) && (BIT(33) >= MAX_AES_INVOKATION)),
+ ctr_size_is_too_small);
+
+ if(0 == s->rem_invokations) {
+ BIKE_ERROR(E_AES_OVER_USED);
+ }
+
+ POSIX_GUARD(aes256_enc(ct, s->ctr.u.bytes, &s->ks));
+
+ s->ctr.u.qw[0]++;
+ s->rem_invokations--;
+
+ return SUCCESS;
+}
+
+ret_t aes_ctr_prf(OUT uint8_t *a,
+ IN OUT aes_ctr_prf_state_t *s,
+ IN const uint32_t len)
+{
+ // When Len is smaller than use what's left in the buffer,
+ // there is no need for additional AES invocations.
+ if((len + s->pos) <= AES256_BLOCK_BYTES) {
+ bike_memcpy(a, &s->buffer.u.bytes[s->pos], len);
+ s->pos += len;
+
+ return SUCCESS;
+ }
+
+ // If s.pos != AES256_BLOCK_BYTES then copy what's left in the buffer.
+ // Else copy zero bytes
+ uint32_t idx = AES256_BLOCK_BYTES - s->pos;
+ bike_memcpy(a, &s->buffer.u.bytes[s->pos], idx);
+
+ // Init s.pos
+ s->pos = 0;
+
+ // Copy full AES blocks
+ while((len - idx) >= AES256_BLOCK_BYTES) {
+ POSIX_GUARD(perform_aes(&a[idx], s));
+ idx += AES256_BLOCK_BYTES;
+ }
+
+ POSIX_GUARD(perform_aes(s->buffer.u.bytes, s));
+
+ // Copy the tail
+ s->pos = len - idx;
+ bike_memcpy(&a[idx], s->buffer.u.bytes, s->pos);
+
+ return SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes_ctr_prf.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes_ctr_prf.h
new file mode 100644
index 0000000000..684a52a6fc
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/aes_ctr_prf.h
@@ -0,0 +1,43 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+#include "aes.h"
+
+//////////////////////////////
+// Types
+/////////////////////////////
+
+typedef struct aes_ctr_prf_state_s {
+ uint128_t ctr;
+ uint128_t buffer;
+ aes256_ks_t ks;
+ uint32_t rem_invokations;
+ uint8_t pos;
+} aes_ctr_prf_state_t;
+
+//////////////////////////////
+// Methods
+/////////////////////////////
+
+ret_t init_aes_ctr_prf_state(OUT aes_ctr_prf_state_t *s,
+ IN uint32_t max_invokations,
+ IN const seed_t *seed);
+
+ret_t aes_ctr_prf(OUT uint8_t *a, IN OUT aes_ctr_prf_state_t *s, IN uint32_t len);
+
+_INLINE_ void finalize_aes_ctr_prf(IN OUT aes_ctr_prf_state_t *s)
+{
+ aes256_free_ks(&s->ks);
+ secure_clean((uint8_t *)s, sizeof(*s));
+}
+
+_INLINE_ void aes_ctr_prf_state_cleanup(IN OUT aes_ctr_prf_state_t *s)
+{
+ finalize_aes_ctr_prf(s);
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/bike_defs.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/bike_defs.h
new file mode 100644
index 0000000000..697efd0627
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/bike_defs.h
@@ -0,0 +1,91 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+#include "defs.h"
+
+////////////////////////////////////////////
+// BIKE Parameters
+///////////////////////////////////////////
+#define N0 2
+
+#if !defined(LEVEL)
+# define LEVEL 1
+#endif
+
+#if(LEVEL == 3)
+# define R_BITS 24659
+# define DV 103
+# define T1 199
+
+# define THRESHOLD_COEFF0 15.2588
+# define THRESHOLD_COEFF1 0.005265
+# define THRESHOLD_MIN 52
+
+// The gf2m code is optimized to a block in this case:
+# define BLOCK_BITS 32768
+#elif(LEVEL == 1)
+// 64-bits of post-quantum security parameters (BIKE paper):
+# define R_BITS 12323
+# define DV 71
+# define T1 134
+
+# define THRESHOLD_COEFF0 13.530
+# define THRESHOLD_COEFF1 0.0069722
+# define THRESHOLD_MIN 36
+
+// The gf2x code is optimized to a block in this case:
+# define BLOCK_BITS (16384)
+#else
+# error "Bad level, choose one of 1/3/5"
+#endif
+
+#define NUM_OF_SEEDS 2
+
+// Round the size to the nearest byte.
+// SIZE suffix, is the number of bytes (uint8_t).
+#define N_BITS (R_BITS * N0)
+#define R_BYTES DIVIDE_AND_CEIL(R_BITS, 8)
+#define R_QWORDS DIVIDE_AND_CEIL(R_BITS, 8 * BYTES_IN_QWORD)
+#define R_XMM DIVIDE_AND_CEIL(R_BITS, 8 * BYTES_IN_XMM)
+#define R_YMM DIVIDE_AND_CEIL(R_BITS, 8 * BYTES_IN_YMM)
+#define R_ZMM DIVIDE_AND_CEIL(R_BITS, 8 * BYTES_IN_ZMM)
+
+#define R_BLOCKS DIVIDE_AND_CEIL(R_BITS, BLOCK_BITS)
+#define R_PADDED (R_BLOCKS * BLOCK_BITS)
+#define R_PADDED_BYTES (R_PADDED / 8)
+#define R_PADDED_QWORDS (R_PADDED / 64)
+
+#define LAST_R_QWORD_LEAD (R_BITS & MASK(6))
+#define LAST_R_QWORD_TRAIL (64 - LAST_R_QWORD_LEAD)
+#define LAST_R_QWORD_MASK MASK(LAST_R_QWORD_LEAD)
+
+#define LAST_R_BYTE_LEAD (R_BITS & MASK(3))
+#define LAST_R_BYTE_TRAIL (8 - LAST_R_BYTE_LEAD)
+#define LAST_R_BYTE_MASK MASK(LAST_R_BYTE_LEAD)
+
+// Data alignement
+#define ALIGN_BYTES (BYTES_IN_ZMM)
+
+#define M_BITS 256
+#define M_BYTES (M_BITS / 8)
+
+#define SS_BITS 256
+#define SS_BYTES (SS_BITS / 8)
+
+#define SEED_BYTES (256 / 8)
+
+//////////////////////////////////
+// Parameters for the BGF decoder.
+//////////////////////////////////
+#define BGF_DECODER
+#define DELTA 3
+#define SLICES (LOG2_MSB(DV) + 1)
+
+// GF2X inversion can only handle R < 32768
+bike_static_assert((R_BITS < 32768), r_too_large_for_inversion);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/bike_r3_kem.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/bike_r3_kem.c
new file mode 100644
index 0000000000..328bb52db8
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/bike_r3_kem.c
@@ -0,0 +1,288 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron, and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#include "decode.h"
+#include "gf2x.h"
+#include "sampling.h"
+#include "sha.h"
+#include "tls/s2n_kem.h"
+#include "pq-crypto/s2n_pq.h"
+
+// m_t and seed_t have the same size and thus can be considered
+// to be of the same type. However, for security reasons we distinguish
+// these types, even on the costs of small extra complexity.
+_INLINE_ void convert_seed_to_m_type(OUT m_t *m, IN const seed_t *seed)
+{
+ bike_static_assert(sizeof(*m) == sizeof(*seed), m_size_eq_seed_size);
+ bike_memcpy(m->raw, seed->raw, sizeof(*m));
+}
+
+_INLINE_ void convert_m_to_seed_type(OUT seed_t *seed, IN const m_t *m)
+{
+ bike_static_assert(sizeof(*m) == sizeof(*seed), m_size_eq_seed_size);
+ bike_memcpy(seed->raw, m->raw, sizeof(*seed));
+}
+
+// (e0, e1) = H(m)
+_INLINE_ ret_t function_h(OUT pad_e_t *e, IN const m_t *m)
+{
+ DEFER_CLEANUP(seed_t seed = {0}, seed_cleanup);
+
+ convert_m_to_seed_type(&seed, m);
+ return generate_error_vector(e, &seed);
+}
+
+// out = L(e)
+_INLINE_ ret_t function_l(OUT m_t *out, IN const pad_e_t *e)
+{
+ DEFER_CLEANUP(sha_dgst_t dgst = {0}, sha_dgst_cleanup);
+ DEFER_CLEANUP(e_t tmp, e_cleanup);
+
+ // Take the padding away
+ tmp.val[0] = e->val[0].val;
+ tmp.val[1] = e->val[1].val;
+
+ POSIX_GUARD(sha(&dgst, sizeof(tmp), (uint8_t *)&tmp));
+
+ // Truncate the SHA384 digest to a 256-bits m_t
+ bike_static_assert(sizeof(dgst) >= sizeof(*out), dgst_size_lt_m_size);
+ bike_memcpy(out->raw, dgst.u.raw, sizeof(*out));
+
+ return SUCCESS;
+}
+
+// Generate the Shared Secret K(m, c0, c1)
+_INLINE_ ret_t function_k(OUT ss_t *out, IN const m_t *m, IN const ct_t *ct)
+{
+ DEFER_CLEANUP(func_k_t tmp, func_k_cleanup);
+ DEFER_CLEANUP(sha_dgst_t dgst = {0}, sha_dgst_cleanup);
+
+ // Copy every element, padded to the nearest byte
+ tmp.m = *m;
+ tmp.c0 = ct->c0;
+ tmp.c1 = ct->c1;
+
+ POSIX_GUARD(sha(&dgst, sizeof(tmp), (uint8_t *)&tmp));
+
+ // Truncate the SHA384 digest to a 256-bits value
+ // to subsequently use it as a seed.
+ bike_static_assert(sizeof(dgst) >= sizeof(*out), dgst_size_lt_out_size);
+ bike_memcpy(out->raw, dgst.u.raw, sizeof(*out));
+
+ return SUCCESS;
+}
+
+_INLINE_ ret_t encrypt(OUT ct_t *ct,
+ IN const pad_e_t *e,
+ IN const pk_t *pk,
+ IN const m_t *m)
+{
+ // Pad the public key and the ciphertext
+ pad_r_t p_ct = {0};
+ pad_r_t p_pk = {0};
+ p_pk.val = *pk;
+
+ // Generate the ciphertext
+ // ct = pk * e1 + e0
+ gf2x_mod_mul(&p_ct, &e->val[1], &p_pk);
+ gf2x_mod_add(&p_ct, &p_ct, &e->val[0]);
+
+ ct->c0 = p_ct.val;
+
+ // c1 = L(e0, e1)
+ POSIX_GUARD(function_l(&ct->c1, e));
+
+ // m xor L(e0, e1)
+ for(size_t i = 0; i < sizeof(*m); i++) {
+ ct->c1.raw[i] ^= m->raw[i];
+ }
+
+ return SUCCESS;
+}
+
+_INLINE_ ret_t reencrypt(OUT m_t *m, IN const pad_e_t *e, IN const ct_t *l_ct)
+{
+ DEFER_CLEANUP(m_t tmp, m_cleanup);
+
+ POSIX_GUARD(function_l(&tmp, e));
+
+ // m' = c1 ^ L(e')
+ for(size_t i = 0; i < sizeof(*m); i++) {
+ m->raw[i] = tmp.raw[i] ^ l_ct->c1.raw[i];
+ }
+
+ return SUCCESS;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// The three APIs below (keypair, encapsulate, decapsulate) are defined by NIST:
+////////////////////////////////////////////////////////////////////////////////
+int BIKE_L1_R3_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk)
+{
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE_REF(sk);
+ POSIX_ENSURE_REF(pk);
+
+ DEFER_CLEANUP(aligned_sk_t l_sk = {0}, sk_cleanup);
+
+ // The secret key is (h0, h1),
+ // and the public key h=(h0^-1 * h1).
+ // Padded structures are used internally, and are required by the
+ // decoder and the gf2x multiplication.
+ DEFER_CLEANUP(pad_r_t h0 = {0}, pad_r_cleanup);
+ DEFER_CLEANUP(pad_r_t h1 = {0}, pad_r_cleanup);
+ DEFER_CLEANUP(pad_r_t h0inv = {0}, pad_r_cleanup);
+ DEFER_CLEANUP(pad_r_t h = {0}, pad_r_cleanup);
+
+ // The randomness of the key generation
+ DEFER_CLEANUP(seeds_t seeds = {0}, seeds_cleanup);
+
+ // An AES_PRF state for the secret key
+ DEFER_CLEANUP(aes_ctr_prf_state_t h_prf_state = {0}, aes_ctr_prf_state_cleanup);
+
+ POSIX_GUARD(get_seeds(&seeds));
+ POSIX_GUARD(init_aes_ctr_prf_state(&h_prf_state, MAX_AES_INVOKATION, &seeds.seed[0]));
+
+ // Generate the secret key (h0, h1) with weight w/2
+ POSIX_GUARD(generate_sparse_rep(&h0, l_sk.wlist[0].val, &h_prf_state));
+ POSIX_GUARD(generate_sparse_rep(&h1, l_sk.wlist[1].val, &h_prf_state));
+
+ // Generate sigma
+ convert_seed_to_m_type(&l_sk.sigma, &seeds.seed[1]);
+
+ // Calculate the public key
+ gf2x_mod_inv(&h0inv, &h0);
+ gf2x_mod_mul(&h, &h1, &h0inv);
+
+ // Fill the secret key data structure with contents - cancel the padding
+ l_sk.bin[0] = h0.val;
+ l_sk.bin[1] = h1.val;
+ l_sk.pk = h.val;
+
+ // Copy the data to the output buffers
+ bike_memcpy(sk, &l_sk, sizeof(l_sk));
+ bike_memcpy(pk, &l_sk.pk, sizeof(l_sk.pk));
+
+ return SUCCESS;
+}
+
+// Encapsulate - pk is the public key,
+// ct is a key encapsulation message (ciphertext),
+// ss is the shared secret.
+int BIKE_L1_R3_crypto_kem_enc(OUT unsigned char * ct,
+ OUT unsigned char * ss,
+ IN const unsigned char *pk)
+{
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE_REF(pk);
+ POSIX_ENSURE_REF(ct);
+ POSIX_ENSURE_REF(ss);
+
+ // Public values (they do not require cleanup on exit).
+ pk_t l_pk;
+ ct_t l_ct;
+
+ DEFER_CLEANUP(m_t m, m_cleanup);
+ DEFER_CLEANUP(ss_t l_ss, ss_cleanup);
+ DEFER_CLEANUP(seeds_t seeds = {0}, seeds_cleanup);
+ DEFER_CLEANUP(pad_e_t e, pad_e_cleanup);
+
+ // Copy the data from the input buffer. This is required in order to avoid
+ // alignment issues on non x86_64 processors.
+ bike_memcpy(&l_pk, pk, sizeof(l_pk));
+
+ POSIX_GUARD(get_seeds(&seeds));
+
+ // e = H(m) = H(seed[0])
+ convert_seed_to_m_type(&m, &seeds.seed[0]);
+ POSIX_GUARD(function_h(&e, &m));
+
+ // Calculate the ciphertext
+ POSIX_GUARD(encrypt(&l_ct, &e, &l_pk, &m));
+
+ // Generate the shared secret
+ POSIX_GUARD(function_k(&l_ss, &m, &l_ct));
+
+ // Copy the data to the output buffers
+ bike_memcpy(ct, &l_ct, sizeof(l_ct));
+ bike_memcpy(ss, &l_ss, sizeof(l_ss));
+
+ return SUCCESS;
+}
+
+// Decapsulate - ct is a key encapsulation message (ciphertext),
+// sk is the private key,
+// ss is the shared secret
+int BIKE_L1_R3_crypto_kem_dec(OUT unsigned char * ss,
+ IN const unsigned char *ct,
+ IN const unsigned char *sk)
+{
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE_REF(sk);
+ POSIX_ENSURE_REF(ct);
+ POSIX_ENSURE_REF(ss);
+
+ // Public values, does not require a cleanup on exit
+ ct_t l_ct;
+
+ DEFER_CLEANUP(seeds_t seeds = {0}, seeds_cleanup);
+
+ DEFER_CLEANUP(ss_t l_ss, ss_cleanup);
+ DEFER_CLEANUP(aligned_sk_t l_sk, sk_cleanup);
+ DEFER_CLEANUP(e_t e, e_cleanup);
+ DEFER_CLEANUP(m_t m_prime, m_cleanup);
+ DEFER_CLEANUP(pad_e_t e_tmp, pad_e_cleanup);
+ DEFER_CLEANUP(pad_e_t e_prime, pad_e_cleanup);
+
+ // Copy the data from the input buffers. This is required in order to avoid
+ // alignment issues on non x86_64 processors.
+ bike_memcpy(&l_ct, ct, sizeof(l_ct));
+ bike_memcpy(&l_sk, sk, sizeof(l_sk));
+
+ // Generate a random error vector to be used in case of decoding failure
+ // (Note: possibly, a "fixed" zeroed error vector could suffice too,
+ // and serve this generation)
+ POSIX_GUARD(get_seeds(&seeds));
+ POSIX_GUARD(generate_error_vector(&e_prime, &seeds.seed[0]));
+
+ // Decode and on success check if |e|=T (all in constant-time)
+ volatile uint32_t success_cond = (decode(&e, &l_ct, &l_sk) == SUCCESS);
+ success_cond &= secure_cmp32(T1, r_bits_vector_weight(&e.val[0]) +
+ r_bits_vector_weight(&e.val[1]));
+
+ // Set appropriate error based on the success condition
+ uint8_t mask = ~secure_l32_mask(0, success_cond);
+ for(size_t i = 0; i < R_BYTES; i++) {
+ PE0_RAW(&e_prime)[i] &= u8_barrier(~mask);
+ PE0_RAW(&e_prime)[i] |= (u8_barrier(mask) & E0_RAW(&e)[i]);
+ PE1_RAW(&e_prime)[i] &= u8_barrier(~mask);
+ PE1_RAW(&e_prime)[i] |= (u8_barrier(mask) & E1_RAW(&e)[i]);
+ }
+
+ POSIX_GUARD(reencrypt(&m_prime, &e_prime, &l_ct));
+
+ // Check if H(m') is equal to (e0', e1')
+ // (in constant-time)
+ POSIX_GUARD(function_h(&e_tmp, &m_prime));
+ success_cond = secure_cmp(PE0_RAW(&e_prime), PE0_RAW(&e_tmp), R_BYTES);
+ success_cond &= secure_cmp(PE1_RAW(&e_prime), PE1_RAW(&e_tmp), R_BYTES);
+
+ // Compute either K(m', C) or K(sigma, C) based on the success condition
+ mask = secure_l32_mask(0, success_cond);
+ for(size_t i = 0; i < M_BYTES; i++) {
+ m_prime.raw[i] &= u8_barrier(~mask);
+ m_prime.raw[i] |= (u8_barrier(mask) & l_sk.sigma.raw[i]);
+ }
+
+ // Generate the shared secret
+ POSIX_GUARD(function_k(&l_ss, &m_prime, &l_ct));
+
+ // Copy the data into the output buffer
+ bike_memcpy(ss, &l_ss, sizeof(l_ss));
+
+ return SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/cleanup.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/cleanup.h
new file mode 100644
index 0000000000..22e8c44250
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/cleanup.h
@@ -0,0 +1,63 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+#include "utilities.h"
+
+/* Runs _thecleanup function on _thealloc once _thealloc went out of scope */
+#define DEFER_CLEANUP(_thealloc, _thecleanup) \
+ __attribute__((cleanup(_thecleanup))) _thealloc
+
+// len is bytes length of in
+_INLINE_ void secure_clean(OUT uint8_t *p, IN const uint32_t len)
+{
+#if defined(_WIN32)
+ SecureZeroMemory(p, len);
+#else
+ typedef void *(*memset_t)(void *, int, size_t);
+ static volatile memset_t memset_func = bike_memset;
+ memset_func(p, 0, len);
+#endif
+}
+
+#define CLEANUP_FUNC(name, type) \
+ _INLINE_ void name##_cleanup(IN OUT type *o) \
+ { \
+ secure_clean((uint8_t *)o, sizeof(*o)); \
+ }
+
+CLEANUP_FUNC(r, r_t)
+CLEANUP_FUNC(m, m_t)
+CLEANUP_FUNC(e, e_t)
+CLEANUP_FUNC(sk, sk_t)
+CLEANUP_FUNC(ss, ss_t)
+CLEANUP_FUNC(ct, ct_t)
+CLEANUP_FUNC(pad_r, pad_r_t)
+CLEANUP_FUNC(pad_e, pad_e_t)
+CLEANUP_FUNC(seed, seed_t)
+CLEANUP_FUNC(syndrome, syndrome_t)
+CLEANUP_FUNC(upc, upc_t)
+CLEANUP_FUNC(func_k, func_k_t)
+CLEANUP_FUNC(dbl_pad_r, dbl_pad_r_t)
+
+// The functions below require special handling because we deal
+// with arrays and not structures.
+
+_INLINE_ void compressed_idx_d_ar_cleanup(IN OUT compressed_idx_d_ar_t *o)
+{
+ for(int i = 0; i < N0; i++) {
+ secure_clean((uint8_t *)&(*o)[i], sizeof((*o)[0]));
+ }
+}
+
+_INLINE_ void seeds_cleanup(IN OUT seeds_t *o)
+{
+ for(int i = 0; i < NUM_OF_SEEDS; i++) {
+ seed_cleanup(&(o->seed[i]));
+ }
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode.c
new file mode 100644
index 0000000000..c280b95f03
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode.c
@@ -0,0 +1,280 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ *
+ * [1] The optimizations are based on the description developed in the paper:
+ * Drucker, Nir, and Shay Gueron. 2019. “A Toolbox for Software Optimization
+ * of QC-MDPC Code-Based Cryptosystems.” Journal of Cryptographic Engineering,
+ * January, 1–17. https://doi.org/10.1007/s13389-018-00200-4.
+ *
+ * [2] The decoder algorithm is the Black-Gray decoder in
+ * the early submission of CAKE (due to N. Sandrier and R Misoczki).
+ *
+ * [3] The analysis for the constant time implementation is given in
+ * Drucker, Nir, Shay Gueron, and Dusan Kostic. 2019.
+ * “On Constant-Time QC-MDPC Decoding with Negligible Failure Rate.”
+ * Cryptology EPrint Archive, 2019. https://eprint.iacr.org/2019/1289.
+ *
+ * [4] it was adapted to BGF in:
+ * Drucker, Nir, Shay Gueron, and Dusan Kostic. 2019.
+ * “QC-MDPC decoders with several shades of gray.”
+ * Cryptology EPrint Archive, 2019. To be published.
+ *
+ * [5] Chou, T.: QcBits: Constant-Time Small-Key Code-Based Cryptography.
+ * In: Gier-lichs, B., Poschmann, A.Y. (eds.) Cryptographic Hardware
+ * and Embedded Systems– CHES 2016. pp. 280–300. Springer Berlin Heidelberg,
+ * Berlin, Heidelberg (2016)
+ *
+ * [6] The rotate512_small funciton is a derivative of the code described in:
+ * Guimarães, Antonio, Diego F Aranha, and Edson Borin. 2019.
+ * “Optimized Implementation of QC-MDPC Code-Based Cryptography.”
+ * Concurrency and Computation: Practice and Experience 31 (18):
+ * e5089. https://doi.org/10.1002/cpe.5089.
+ */
+
+#include "decode.h"
+#include "cleanup.h"
+#include "decode_internal.h"
+#include "gf2x.h"
+#include "utilities.h"
+
+// Decoding (bit-flipping) parameter
+#if defined(BG_DECODER)
+# if(LEVEL == 1)
+# define MAX_IT 3
+# elif(LEVEL == 3)
+# define MAX_IT 4
+# else
+# error "Level can only be 1/3"
+# endif
+#elif defined(BGF_DECODER)
+# if(LEVEL == 1)
+# define MAX_IT 5
+# elif(LEVEL == 3)
+# define MAX_IT 5
+# else
+# error "Level can only be 1/3"
+# endif
+#endif
+
+ret_t compute_syndrome(OUT syndrome_t *syndrome,
+ IN const pad_r_t *c0,
+ IN const pad_r_t *h0,
+ IN const decode_ctx *ctx)
+{
+ DEFER_CLEANUP(pad_r_t pad_s, pad_r_cleanup);
+
+ gf2x_mod_mul(&pad_s, c0, h0);
+
+ bike_memcpy((uint8_t *)syndrome->qw, pad_s.val.raw, R_BYTES);
+ ctx->dup(syndrome);
+
+ return SUCCESS;
+}
+
+_INLINE_ ret_t recompute_syndrome(OUT syndrome_t *syndrome,
+ IN const pad_r_t *c0,
+ IN const pad_r_t *h0,
+ IN const pad_r_t *pk,
+ IN const e_t *e,
+ IN const decode_ctx *ctx)
+{
+ DEFER_CLEANUP(pad_r_t tmp_c0, pad_r_cleanup);
+ DEFER_CLEANUP(pad_r_t e0 = {0}, pad_r_cleanup);
+ DEFER_CLEANUP(pad_r_t e1 = {0}, pad_r_cleanup);
+
+ e0.val = e->val[0];
+ e1.val = e->val[1];
+
+ // tmp_c0 = pk * e1 + c0 + e0
+ gf2x_mod_mul(&tmp_c0, &e1, pk);
+ gf2x_mod_add(&tmp_c0, &tmp_c0, c0);
+ gf2x_mod_add(&tmp_c0, &tmp_c0, &e0);
+
+ // Recompute the syndrome using the updated ciphertext
+ POSIX_GUARD(compute_syndrome(syndrome, &tmp_c0, h0, ctx));
+
+ return SUCCESS;
+}
+
+_INLINE_ uint8_t get_threshold(IN const syndrome_t *s)
+{
+ bike_static_assert(sizeof(*s) >= sizeof(r_t), syndrome_is_large_enough);
+
+ const uint32_t syndrome_weight = r_bits_vector_weight((const r_t *)s->qw);
+
+ // The equations below are defined in BIKE's specification p. 16, Section 5.2
+ uint32_t thr = THRESHOLD_COEFF0 + (THRESHOLD_COEFF1 * syndrome_weight);
+ const uint32_t mask = secure_l32_mask(thr, THRESHOLD_MIN);
+ thr = (u32_barrier(mask) & thr) | (u32_barrier(~mask) & THRESHOLD_MIN);
+
+ DMSG(" Threshold: %d\n", thr);
+ return thr;
+}
+
+// Calculate the Unsatisfied Parity Checks (UPCs) and update the errors
+// vector (e) accordingly. In addition, update the black and gray errors vector
+// with the relevant values.
+_INLINE_ void find_err1(OUT e_t *e,
+ OUT e_t *black_e,
+ OUT e_t *gray_e,
+ IN const syndrome_t * syndrome,
+ IN const compressed_idx_d_ar_t wlist,
+ IN const uint8_t threshold,
+ IN const decode_ctx *ctx)
+{
+ // This function uses the bit-slice-adder methodology of [5]:
+ DEFER_CLEANUP(syndrome_t rotated_syndrome = {0}, syndrome_cleanup);
+ DEFER_CLEANUP(upc_t upc, upc_cleanup);
+
+ for(uint32_t i = 0; i < N0; i++) {
+ // UPC must start from zero at every iteration
+ bike_memset(&upc, 0, sizeof(upc));
+
+ // 1) Right-rotate the syndrome for every secret key set bit index
+ // Then slice-add it to the UPC array.
+ for(size_t j = 0; j < DV; j++) {
+ ctx->rotate_right(&rotated_syndrome, syndrome, wlist[i].val[j]);
+ ctx->bit_sliced_adder(&upc, &rotated_syndrome, LOG2_MSB(j + 1));
+ }
+
+ // 2) Subtract the threshold from the UPC counters
+ ctx->bit_slice_full_subtract(&upc, threshold);
+
+ // 3) Update the errors and the black errors vectors.
+ // The last slice of the UPC array holds the MSB of the accumulated values
+ // minus the threshold. Every zero bit indicates a potential error bit.
+ // The errors values are stored in the black array and xored with the
+ // errors Of the previous iteration.
+ const r_t *last_slice = &(upc.slice[SLICES - 1].u.r.val);
+ for(size_t j = 0; j < R_BYTES; j++) {
+ const uint8_t sum_msb = (~last_slice->raw[j]);
+ black_e->val[i].raw[j] = sum_msb;
+ e->val[i].raw[j] ^= sum_msb;
+ }
+
+ // Ensure that the padding bits (upper bits of the last byte) are zero so
+ // they will not be included in the multiplication and in the hash function.
+ e->val[i].raw[R_BYTES - 1] &= LAST_R_BYTE_MASK;
+
+ // 4) Calculate the gray error array by adding "DELTA" to the UPC array.
+ // For that we reuse the rotated_syndrome variable setting it to all "1".
+ for(size_t l = 0; l < DELTA; l++) {
+ bike_memset((uint8_t *)rotated_syndrome.qw, 0xff, R_BYTES);
+ ctx->bit_sliced_adder(&upc, &rotated_syndrome, SLICES);
+ }
+
+ // 5) Update the gray list with the relevant bits that are not
+ // set in the black list.
+ for(size_t j = 0; j < R_BYTES; j++) {
+ const uint8_t sum_msb = (~last_slice->raw[j]);
+ gray_e->val[i].raw[j] = (~(black_e->val[i].raw[j])) & sum_msb;
+ }
+ }
+}
+
+// Recalculate the UPCs and update the errors vector (e) according to it
+// and to the black/gray vectors.
+_INLINE_ void find_err2(OUT e_t *e,
+ IN e_t * pos_e,
+ IN const syndrome_t * syndrome,
+ IN const compressed_idx_d_ar_t wlist,
+ IN const uint8_t threshold,
+ IN const decode_ctx *ctx)
+{
+ DEFER_CLEANUP(syndrome_t rotated_syndrome = {0}, syndrome_cleanup);
+ DEFER_CLEANUP(upc_t upc, upc_cleanup);
+
+ for(uint32_t i = 0; i < N0; i++) {
+ // UPC must start from zero at every iteration
+ bike_memset(&upc, 0, sizeof(upc));
+
+ // 1) Right-rotate the syndrome, for every index of a set bit in the secret
+ // key. Then slice-add it to the UPC array.
+ for(size_t j = 0; j < DV; j++) {
+ ctx->rotate_right(&rotated_syndrome, syndrome, wlist[i].val[j]);
+ ctx->bit_sliced_adder(&upc, &rotated_syndrome, LOG2_MSB(j + 1));
+ }
+
+ // 2) Subtract the threshold from the UPC counters
+ ctx->bit_slice_full_subtract(&upc, threshold);
+
+ // 3) Update the errors vector.
+ // The last slice of the UPC array holds the MSB of the accumulated values
+ // minus the threshold. Every zero bit indicates a potential error bit.
+ const r_t *last_slice = &(upc.slice[SLICES - 1].u.r.val);
+ for(size_t j = 0; j < R_BYTES; j++) {
+ const uint8_t sum_msb = (~last_slice->raw[j]);
+ e->val[i].raw[j] ^= (pos_e->val[i].raw[j] & sum_msb);
+ }
+
+ // Ensure that the padding bits (upper bits of the last byte) are zero, so
+ // they are not included in the multiplication, and in the hash function.
+ e->val[i].raw[R_BYTES - 1] &= LAST_R_BYTE_MASK;
+ }
+}
+
+ret_t decode(OUT e_t *e, IN const ct_t *ct, IN const sk_t *sk)
+{
+ // Initialize the decode methods struct
+ decode_ctx ctx;
+ decode_ctx_init(&ctx);
+
+ DEFER_CLEANUP(e_t black_e = {0}, e_cleanup);
+ DEFER_CLEANUP(e_t gray_e = {0}, e_cleanup);
+
+ DEFER_CLEANUP(pad_r_t c0 = {0}, pad_r_cleanup);
+ DEFER_CLEANUP(pad_r_t h0 = {0}, pad_r_cleanup);
+ pad_r_t pk = {0};
+
+ // Pad ciphertext (c0), secret key (h0), and public key (h)
+ c0.val = ct->c0;
+ h0.val = sk->bin[0];
+ pk.val = sk->pk;
+
+ DEFER_CLEANUP(syndrome_t s = {0}, syndrome_cleanup);
+ DMSG(" Computing s.\n");
+ POSIX_GUARD(compute_syndrome(&s, &c0, &h0, &ctx));
+ ctx.dup(&s);
+
+ // Reset (init) the error because it is xored in the find_err functions.
+ bike_memset(e, 0, sizeof(*e));
+
+ for(uint32_t iter = 0; iter < MAX_IT; iter++) {
+ const uint8_t threshold = get_threshold(&s);
+
+ DMSG(" Iteration: %d\n", iter);
+ DMSG(" Weight of e: %lu\n",
+ r_bits_vector_weight(&e->val[0]) + r_bits_vector_weight(&e->val[1]));
+ DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
+
+ find_err1(e, &black_e, &gray_e, &s, sk->wlist, threshold, &ctx);
+ POSIX_GUARD(recompute_syndrome(&s, &c0, &h0, &pk, e, &ctx));
+#if defined(BGF_DECODER)
+ if(iter >= 1) {
+ continue;
+ }
+#endif
+ DMSG(" Weight of e: %lu\n",
+ r_bits_vector_weight(&e->val[0]) + r_bits_vector_weight(&e->val[1]));
+ DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
+
+ find_err2(e, &black_e, &s, sk->wlist, ((DV + 1) / 2) + 1, &ctx);
+ POSIX_GUARD(recompute_syndrome(&s, &c0, &h0, &pk, e, &ctx));
+
+ DMSG(" Weight of e: %lu\n",
+ r_bits_vector_weight(&e->val[0]) + r_bits_vector_weight(&e->val[1]));
+ DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
+
+ find_err2(e, &gray_e, &s, sk->wlist, ((DV + 1) / 2) + 1, &ctx);
+ POSIX_GUARD(recompute_syndrome(&s, &c0, &h0, &pk, e, &ctx));
+ }
+
+ if(r_bits_vector_weight((r_t *)s.qw) > 0) {
+ BIKE_ERROR(E_DECODING_FAILURE);
+ }
+
+ return SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode.h
new file mode 100644
index 0000000000..8e405ea12e
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode.h
@@ -0,0 +1,12 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+#include "types.h"
+
+ret_t decode(OUT e_t *e, IN const ct_t *ct, IN const sk_t *sk);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx2.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx2.c
new file mode 100644
index 0000000000..ea8b91a499
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx2.c
@@ -0,0 +1,173 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ *
+ * The rotate functions are based on the Barrel shifter described in [1] and
+ * some code snippets from [2]:
+ *
+ * [1] Chou, T.: QcBits: Constant-Time Small-Key Code-Based Cryptography.
+ * In: Gier-lichs, B., Poschmann, A.Y. (eds.) Cryptographic Hardware
+ * and Embedded Systems– CHES 2016. pp. 280–300. Springer Berlin Heidelberg,
+ * Berlin, Heidelberg (2016)
+ *
+ * [2] Guimarães, Antonio, Diego F Aranha, and Edson Borin. 2019.
+ * “Optimized Implementation of QC-MDPC Code-Based Cryptography.”
+ * Concurrency and Computation: Practice and Experience 31 (18):
+ * e5089. https://doi.org/10.1002/cpe.5089.
+ */
+
+#if defined(S2N_BIKE_R3_AVX2)
+
+#include "decode.h"
+#include "decode_internal.h"
+#include "utilities.h"
+
+#define AVX2_INTERNAL
+#include "x86_64_intrinsic.h"
+
+#define R_YMM_HALF_LOG2 UPTOPOW2(R_YMM / 2)
+
+_INLINE_ void
+rotate256_big(OUT syndrome_t *out, IN const syndrome_t *in, IN size_t ymm_num)
+{
+ // For preventing overflows (comparison in bytes)
+ bike_static_assert(sizeof(*out) >
+ (BYTES_IN_YMM * (R_YMM + (2 * R_YMM_HALF_LOG2))),
+ rotr_big_err);
+
+ *out = *in;
+
+ for(uint32_t idx = R_YMM_HALF_LOG2; idx >= 1; idx >>= 1) {
+ const uint8_t mask = secure_l32_mask(ymm_num, idx);
+ const __m256i blend_mask = SET1_I8(mask);
+ ymm_num = ymm_num - (idx & mask);
+
+ for(size_t i = 0; i < (R_YMM + idx); i++) {
+ __m256i a = LOAD(&out->qw[4 * (i + idx)]);
+ __m256i b = LOAD(&out->qw[4 * i]);
+ b = BLENDV_I8(b, a, blend_mask);
+ STORE(&out->qw[4 * i], b);
+ }
+ }
+}
+
+_INLINE_ void
+rotate256_small(OUT syndrome_t *out, IN const syndrome_t *in, size_t count)
+{
+ __m256i carry_in = SET_ZERO;
+ const int count64 = (int)count & 0x3f;
+ const uint64_t count_mask = (count >> 5) & 0xe;
+
+ __m256i idx = SET_I32(7, 6, 5, 4, 3, 2, 1, 0);
+ const __m256i zero_mask = SET_I64(-1, -1, -1, 0);
+ const __m256i count_vet = SET1_I8(count_mask);
+
+ ALIGN(ALIGN_BYTES)
+ const uint8_t zero_mask2_buf[] = {
+ 0x86, 0x86, 0x86, 0x86, 0x86, 0x86, 0x86, 0x86, 0x84, 0x84, 0x84,
+ 0x84, 0x84, 0x84, 0x84, 0x84, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82,
+ 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80};
+ __m256i zero_mask2 = LOAD(zero_mask2_buf);
+
+ zero_mask2 = SUB_I8(zero_mask2, count_vet);
+ idx = ADD_I8(idx, count_vet);
+
+ for(int i = R_YMM; i >= 0; i--) {
+ // Load the next 256 bits
+ __m256i in256 = LOAD(&in->qw[4 * i]);
+
+ // Rotate the current and previous 256 registers so that their quadwords
+ // would be in the right positions.
+ __m256i carry_out = PERMVAR_I32(in256, idx);
+ in256 = BLENDV_I8(carry_in, carry_out, zero_mask2);
+
+ // Shift less than 64 (quadwords internal)
+ __m256i inner_carry = BLENDV_I8(carry_in, in256, zero_mask);
+ inner_carry = PERM_I64(inner_carry, 0x39);
+ const __m256i out256 =
+ SRLI_I64(in256, count64) | SLLI_I64(inner_carry, (int)64 - count64);
+
+ // Store the rotated value
+ STORE(&out->qw[4 * i], out256);
+ carry_in = carry_out;
+ }
+}
+
+void rotate_right_avx2(OUT syndrome_t *out,
+ IN const syndrome_t *in,
+ IN const uint32_t bitscount)
+{
+ // 1) Rotate in granularity of 256 bits blocks, using YMMs
+ rotate256_big(out, in, (bitscount / BITS_IN_YMM));
+ // 2) Rotate in smaller granularity (less than 256 bits), using YMMs
+ rotate256_small(out, out, (bitscount % BITS_IN_YMM));
+}
+
+// Duplicates the first R_BITS of the syndrome three times
+// |------------------------------------------|
+// | Third copy | Second copy | first R_BITS |
+// |------------------------------------------|
+// This is required by the rotate functions.
+void dup_avx2(IN OUT syndrome_t *s)
+{
+ s->qw[R_QWORDS - 1] =
+ (s->qw[0] << LAST_R_QWORD_LEAD) | (s->qw[R_QWORDS - 1] & LAST_R_QWORD_MASK);
+
+ for(size_t i = 0; i < (2 * R_QWORDS) - 1; i++) {
+ s->qw[R_QWORDS + i] =
+ (s->qw[i] >> LAST_R_QWORD_TRAIL) | (s->qw[i + 1] << LAST_R_QWORD_LEAD);
+ }
+}
+
+// Use half-adder as described in [1].
+void bit_sliced_adder_avx2(OUT upc_t *upc,
+ IN OUT syndrome_t *rotated_syndrome,
+ IN const size_t num_of_slices)
+{
+ // From cache-memory perspective this loop should be the outside loop
+ for(size_t j = 0; j < num_of_slices; j++) {
+ for(size_t i = 0; i < R_QWORDS; i++) {
+ const uint64_t carry = (upc->slice[j].u.qw[i] & rotated_syndrome->qw[i]);
+ upc->slice[j].u.qw[i] ^= rotated_syndrome->qw[i];
+ rotated_syndrome->qw[i] = carry;
+ }
+ }
+}
+
+void bit_slice_full_subtract_avx2(OUT upc_t *upc, IN uint8_t val)
+{
+ // Borrow
+ uint64_t br[R_QWORDS] = {0};
+
+ for(size_t j = 0; j < SLICES; j++) {
+
+ const uint64_t lsb_mask = 0 - (val & 0x1);
+ val >>= 1;
+
+ // Perform a - b with c as the input/output carry
+ // br = 0 0 0 0 1 1 1 1
+ // a = 0 0 1 1 0 0 1 1
+ // b = 0 1 0 1 0 1 0 1
+ // -------------------
+ // o = 0 1 1 0 0 1 1 1
+ // c = 0 1 0 0 1 1 0 1
+ //
+ // o = a^b^c
+ // _ __ _ _ _ _ _
+ // br = abc + abc + abc + abc = abc + ((a+b))c
+
+ for(size_t i = 0; i < R_QWORDS; i++) {
+ const uint64_t a = upc->slice[j].u.qw[i];
+ const uint64_t b = lsb_mask;
+ const uint64_t tmp = ((~a) & b & (~br[i])) | ((((~a) | b) & br[i]));
+ upc->slice[j].u.qw[i] = a ^ b ^ br[i];
+ br[i] = tmp;
+ }
+ }
+}
+
+#endif
+
+typedef int dummy_typedef_to_avoid_empty_translation_unit_warning;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx512.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx512.c
new file mode 100644
index 0000000000..ef7f6d29d5
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_avx512.c
@@ -0,0 +1,167 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ *
+ * The rotation functions are based on the Barrel shifter described in [1]
+ * and some modifed snippet from [2]
+ * [1] Chou, T.: QcBits: Constant-Time Small-Key Code-Based Cryptography.
+ * In: Gier-lichs, B., Poschmann, A.Y. (eds.) Cryptographic Hardware
+ * and Embedded Systems– CHES 2016. pp. 280–300. Springer Berlin Heidelberg,
+ * Berlin, Heidelberg (2016)
+ *
+ * [2] Guimarães, Antonio, Diego F Aranha, and Edson Borin. 2019.
+ * “Optimized Implementation of QC-MDPC Code-Based Cryptography.”
+ * Concurrency and Computation: Practice and Experience 31 (18):
+ * e5089. https://doi.org/10.1002/cpe.5089.
+ */
+
+#if defined(S2N_BIKE_R3_AVX512)
+
+#include "decode.h"
+#include "decode_internal.h"
+#include "utilities.h"
+
+#define AVX512_INTERNAL
+#include "x86_64_intrinsic.h"
+
+#define R_ZMM_HALF_LOG2 UPTOPOW2(R_ZMM / 2)
+
+_INLINE_ void
+rotate512_big(OUT syndrome_t *out, IN const syndrome_t *in, size_t zmm_num)
+{
+ // For preventing overflows (comparison in bytes)
+ bike_static_assert(sizeof(*out) >
+ (BYTES_IN_ZMM * (R_ZMM + (2 * R_ZMM_HALF_LOG2))),
+ rotr_big_err);
+ *out = *in;
+
+ for(uint32_t idx = R_ZMM_HALF_LOG2; idx >= 1; idx >>= 1) {
+ const uint8_t mask = secure_l32_mask(zmm_num, idx);
+ zmm_num = zmm_num - (idx & mask);
+
+ for(size_t i = 0; i < (R_ZMM + idx); i++) {
+ const __m512i a = LOAD(&out->qw[8 * (i + idx)]);
+ MSTORE(&out->qw[8 * i], mask, a);
+ }
+ }
+}
+
+// The rotate512_small function is a derivative of the code described in [1]
+_INLINE_ void
+rotate512_small(OUT syndrome_t *out, IN const syndrome_t *in, size_t bitscount)
+{
+ __m512i previous = SET_ZERO;
+ const int count64 = (int)bitscount & 0x3f;
+ const __m512i count64_512 = SET1_I64(count64);
+ const __m512i count64_512r = SET1_I64((int)64 - count64);
+
+ const __m512i num_full_qw = SET1_I64(bitscount >> 6);
+ const __m512i one = SET1_I64(1);
+ __m512i a0, a1;
+
+ __m512i idx = SET_I64(7, 6, 5, 4, 3, 2, 1, 0);
+
+ // Positions above 7 are taken from the second register in
+ // _mm512_permutex2var_epi64
+ idx = ADD_I64(idx, num_full_qw);
+ __m512i idx1 = ADD_I64(idx, one);
+
+ for(int i = R_ZMM; i >= 0; i--) {
+ // Load the next 512 bits
+ const __m512i in512 = LOAD(&in->qw[8 * i]);
+
+ // Rotate the current and previous 512 registers so that their quadwords
+ // would be in the right positions.
+ a0 = PERMX2VAR_I64(in512, idx, previous);
+ a1 = PERMX2VAR_I64(in512, idx1, previous);
+
+ a0 = SRLV_I64(a0, count64_512);
+ a1 = SLLV_I64(a1, count64_512r);
+
+ // Shift less than 64 (quadwords internal)
+ const __m512i out512 = a0 | a1;
+
+ // Store the rotated value
+ STORE(&out->qw[8 * i], out512);
+ previous = in512;
+ }
+}
+
+void rotate_right_avx512(OUT syndrome_t *out,
+ IN const syndrome_t *in,
+ IN const uint32_t bitscount)
+{
+ // 1) Rotate in granularity of 512 bits blocks, using ZMMs
+ rotate512_big(out, in, (bitscount / BITS_IN_ZMM));
+ // 2) Rotate in smaller granularity (less than 512 bits), using ZMMs
+ rotate512_small(out, out, (bitscount % BITS_IN_ZMM));
+}
+
+// Duplicates the first R_BITS of the syndrome three times
+// |------------------------------------------|
+// | Third copy | Second copy | first R_BITS |
+// |------------------------------------------|
+// This is required by the rotate functions.
+void dup_avx512(IN OUT syndrome_t *s)
+{
+ s->qw[R_QWORDS - 1] =
+ (s->qw[0] << LAST_R_QWORD_LEAD) | (s->qw[R_QWORDS - 1] & LAST_R_QWORD_MASK);
+
+ for(size_t i = 0; i < (2 * R_QWORDS) - 1; i++) {
+ s->qw[R_QWORDS + i] =
+ (s->qw[i] >> LAST_R_QWORD_TRAIL) | (s->qw[i + 1] << LAST_R_QWORD_LEAD);
+ }
+}
+
+// Use half-adder as described in [1].
+void bit_sliced_adder_avx512(OUT upc_t *upc,
+ IN OUT syndrome_t *rotated_syndrome,
+ IN const size_t num_of_slices)
+{
+ // From cache-memory perspective this loop should be the outside loop
+ for(size_t j = 0; j < num_of_slices; j++) {
+ for(size_t i = 0; i < R_QWORDS; i++) {
+ const uint64_t carry = (upc->slice[j].u.qw[i] & rotated_syndrome->qw[i]);
+ upc->slice[j].u.qw[i] ^= rotated_syndrome->qw[i];
+ rotated_syndrome->qw[i] = carry;
+ }
+ }
+}
+
+void bit_slice_full_subtract_avx512(OUT upc_t *upc, IN uint8_t val)
+{
+ // Borrow
+ uint64_t br[R_QWORDS] = {0};
+
+ for(size_t j = 0; j < SLICES; j++) {
+
+ const uint64_t lsb_mask = 0 - (val & 0x1);
+ val >>= 1;
+
+ // Perform a - b with c as the input/output carry
+ // br = 0 0 0 0 1 1 1 1
+ // a = 0 0 1 1 0 0 1 1
+ // b = 0 1 0 1 0 1 0 1
+ // -------------------
+ // o = 0 1 1 0 0 1 1 1
+ // c = 0 1 0 0 1 1 0 1
+ //
+ // o = a^b^c
+ // _ __ _ _ _ _ _
+ // br = abc + abc + abc + abc = abc + ((a+b))c
+
+ for(size_t i = 0; i < R_QWORDS; i++) {
+ const uint64_t a = upc->slice[j].u.qw[i];
+ const uint64_t b = lsb_mask;
+ const uint64_t tmp = ((~a) & b & (~br[i])) | ((((~a) | b) & br[i]));
+ upc->slice[j].u.qw[i] = a ^ b ^ br[i];
+ br[i] = tmp;
+ }
+ }
+}
+
+#endif
+
+typedef int dummy_typedef_to_avoid_empty_translation_unit_warning;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_internal.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_internal.h
new file mode 100644
index 0000000000..817cc4603a
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_internal.h
@@ -0,0 +1,86 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+#include "pq-crypto/s2n_pq.h"
+#include "defs.h"
+#include "types.h"
+
+// Rotate right the first R_BITS of a syndrome.
+// At input, the syndrome is stored as three R_BITS triplicate.
+// (this makes rotation easier to implement)
+// For the output: the output syndrome has only one R_BITS rotation, the remaining
+// (2 * R_BITS) bits are undefined.
+void rotate_right_port(OUT syndrome_t *out,
+ IN const syndrome_t *in,
+ IN uint32_t bitscount);
+void dup_port(IN OUT syndrome_t *s);
+void bit_sliced_adder_port(OUT upc_t *upc,
+ IN OUT syndrome_t *rotated_syndrome,
+ IN const size_t num_of_slices);
+void bit_slice_full_subtract_port(OUT upc_t *upc, IN uint8_t val);
+
+#if defined(S2N_BIKE_R3_AVX2)
+void rotate_right_avx2(OUT syndrome_t *out,
+ IN const syndrome_t *in,
+ IN uint32_t bitscount);
+void dup_avx2(IN OUT syndrome_t *s);
+void bit_sliced_adder_avx2(OUT upc_t *upc,
+ IN OUT syndrome_t *rotated_syndrome,
+ IN const size_t num_of_slices);
+void bit_slice_full_subtract_avx2(OUT upc_t *upc, IN uint8_t val);
+#endif
+
+#if defined(S2N_BIKE_R3_AVX512)
+void rotate_right_avx512(OUT syndrome_t *out,
+ IN const syndrome_t *in,
+ IN uint32_t bitscount);
+void dup_avx512(IN OUT syndrome_t *s);
+void bit_sliced_adder_avx512(OUT upc_t *upc,
+ IN OUT syndrome_t *rotated_syndrome,
+ IN const size_t num_of_slices);
+void bit_slice_full_subtract_avx512(OUT upc_t *upc, IN uint8_t val);
+#endif
+
+// Decode methods struct
+typedef struct decode_ctx_st {
+ void (*rotate_right)(OUT syndrome_t *out,
+ IN const syndrome_t *in,
+ IN uint32_t bitscount);
+ void (*dup)(IN OUT syndrome_t *s);
+ void (*bit_sliced_adder)(OUT upc_t *upc,
+ IN OUT syndrome_t *rotated_syndrom,
+ IN const size_t num_of_slices);
+ void (*bit_slice_full_subtract)(OUT upc_t *upc, IN uint8_t val);
+} decode_ctx;
+
+_INLINE_ void decode_ctx_init(decode_ctx *ctx)
+{
+#if defined(S2N_BIKE_R3_AVX512)
+ if(s2n_bike_r3_is_avx512_enabled()) {
+ ctx->rotate_right = rotate_right_avx512;
+ ctx->dup = dup_avx512;
+ ctx->bit_sliced_adder = bit_sliced_adder_avx512;
+ ctx->bit_slice_full_subtract = bit_slice_full_subtract_avx512;
+ } else
+#endif
+#if defined(S2N_BIKE_R3_AVX2)
+ if(s2n_bike_r3_is_avx2_enabled()) {
+ ctx->rotate_right = rotate_right_avx2;
+ ctx->dup = dup_avx2;
+ ctx->bit_sliced_adder = bit_sliced_adder_avx2;
+ ctx->bit_slice_full_subtract = bit_slice_full_subtract_avx2;
+ } else
+#endif
+ {
+ ctx->rotate_right = rotate_right_port;
+ ctx->dup = dup_port;
+ ctx->bit_sliced_adder = bit_sliced_adder_port;
+ ctx->bit_slice_full_subtract = bit_slice_full_subtract_port;
+ }
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_portable.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_portable.c
new file mode 100644
index 0000000000..846818386d
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/decode_portable.c
@@ -0,0 +1,126 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#include "decode.h"
+#include "decode_internal.h"
+#include "utilities.h"
+
+#define R_QWORDS_HALF_LOG2 UPTOPOW2(R_QWORDS / 2)
+
+_INLINE_ void
+rotr_big(OUT syndrome_t *out, IN const syndrome_t *in, IN size_t qw_num)
+{
+ // For preventing overflows (comparison in bytes)
+ bike_static_assert(sizeof(*out) > 8 * (R_QWORDS + (2 * R_QWORDS_HALF_LOG2)),
+ rotr_big_err);
+
+ *out = *in;
+
+ for(uint32_t idx = R_QWORDS_HALF_LOG2; idx >= 1; idx >>= 1) {
+ // Convert 32 bit mask to 64 bit mask
+ const uint64_t mask = ((uint32_t)secure_l32_mask(qw_num, idx) + 1U) - 1ULL;
+ qw_num = qw_num - (idx & u64_barrier(mask));
+
+ // Rotate R_QWORDS quadwords and another idx quadwords,
+ // as needed by the next iteration.
+ for(size_t i = 0; i < (R_QWORDS + idx); i++) {
+ out->qw[i] = (out->qw[i] & u64_barrier(~mask)) |
+ (out->qw[i + idx] & u64_barrier(mask));
+ }
+ }
+}
+
+_INLINE_ void
+rotr_small(OUT syndrome_t *out, IN const syndrome_t *in, IN const size_t bits)
+{
+ bike_static_assert(bits < 64, rotr_small_err);
+ bike_static_assert(sizeof(*out) > (8 * R_QWORDS), rotr_small_qw_err);
+
+ // Convert |bits| to 0/1 by using !!bits; then create a mask of 0 or
+ // 0xffffffffff Use high_shift to avoid undefined behaviour when doing x << 64;
+ const uint64_t mask = (0 - (!!bits));
+ const uint64_t high_shift = (64 - bits) & u64_barrier(mask);
+
+ for(size_t i = 0; i < R_QWORDS; i++) {
+ const uint64_t low_part = in->qw[i] >> bits;
+ const uint64_t high_part = (in->qw[i + 1] << high_shift) & u64_barrier(mask);
+ out->qw[i] = low_part | high_part;
+ }
+}
+
+void rotate_right_port(OUT syndrome_t *out,
+ IN const syndrome_t *in,
+ IN const uint32_t bitscount)
+{
+ // Rotate (64-bit) quad-words
+ rotr_big(out, in, (bitscount / 64));
+ // Rotate bits (less than 64)
+ rotr_small(out, out, (bitscount % 64));
+}
+
+// Duplicates the first R_BITS of the syndrome three times
+// |------------------------------------------|
+// | Third copy | Second copy | first R_BITS |
+// |------------------------------------------|
+// This is required by the rotate functions.
+void dup_port(IN OUT syndrome_t *s)
+{
+ s->qw[R_QWORDS - 1] =
+ (s->qw[0] << LAST_R_QWORD_LEAD) | (s->qw[R_QWORDS - 1] & LAST_R_QWORD_MASK);
+
+ for(size_t i = 0; i < (2 * R_QWORDS) - 1; i++) {
+ s->qw[R_QWORDS + i] =
+ (s->qw[i] >> LAST_R_QWORD_TRAIL) | (s->qw[i + 1] << LAST_R_QWORD_LEAD);
+ }
+}
+
+// Use half-adder as described in [1].
+void bit_sliced_adder_port(OUT upc_t *upc,
+ IN OUT syndrome_t *rotated_syndrome,
+ IN const size_t num_of_slices)
+{
+ // From cache-memory perspective this loop should be the outside loop
+ for(size_t j = 0; j < num_of_slices; j++) {
+ for(size_t i = 0; i < R_QWORDS; i++) {
+ const uint64_t carry = (upc->slice[j].u.qw[i] & rotated_syndrome->qw[i]);
+ upc->slice[j].u.qw[i] ^= rotated_syndrome->qw[i];
+ rotated_syndrome->qw[i] = carry;
+ }
+ }
+}
+
+void bit_slice_full_subtract_port(OUT upc_t *upc, IN uint8_t val)
+{
+ // Borrow
+ uint64_t br[R_QWORDS] = {0};
+
+ for(size_t j = 0; j < SLICES; j++) {
+
+ const uint64_t lsb_mask = 0 - (val & 0x1);
+ val >>= 1;
+
+ // Perform a - b with c as the input/output carry
+ // br = 0 0 0 0 1 1 1 1
+ // a = 0 0 1 1 0 0 1 1
+ // b = 0 1 0 1 0 1 0 1
+ // -------------------
+ // o = 0 1 1 0 0 1 1 1
+ // c = 0 1 0 0 1 1 0 1
+ //
+ // o = a^b^c
+ // _ __ _ _ _ _ _
+ // br = abc + abc + abc + abc = abc + ((a+b))c
+
+ for(size_t i = 0; i < R_QWORDS; i++) {
+ const uint64_t a = upc->slice[j].u.qw[i];
+ const uint64_t b = lsb_mask;
+ const uint64_t tmp = ((~a) & b & (~br[i])) | ((((~a) | b) & br[i]));
+ upc->slice[j].u.qw[i] = a ^ b ^ br[i];
+ br[i] = tmp;
+ }
+ }
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/defs.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/defs.h
new file mode 100644
index 0000000000..ab3f5c7a32
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/defs.h
@@ -0,0 +1,107 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+////////////////////////////////////////////
+// Basic defs
+///////////////////////////////////////////
+
+// For code clarity.
+#define IN
+#define OUT
+
+#define ALIGN(n) __attribute__((aligned(n)))
+#define BIKE_UNUSED_ATT __attribute__((unused))
+
+#define _INLINE_ static inline
+
+// In asm the symbols '==' and '?' are not allowed. Therefore, if using
+// divide_and_ceil in asm files, we must ensure with static_assert its validity.
+#if(__cplusplus >= 201103L) || defined(static_assert)
+# define bike_static_assert(COND, MSG) static_assert(COND, "MSG")
+#else
+# define bike_static_assert(COND, MSG) \
+ typedef char static_assertion_##MSG[(COND) ? 1 : -1] BIKE_UNUSED_ATT
+#endif
+
+// Divide by the divider and round up to next integer
+#define DIVIDE_AND_CEIL(x, divider) (((x) + (divider) - 1) / (divider))
+
+// Bit manipulations
+// Linux Assemblies, except for Ubuntu, cannot understand what ULL mean.
+// Therefore, in that case len must be smaller than 31.
+#define BIT(len) (1ULL << (len))
+#define MASK(len) (BIT(len) - 1)
+#define SIZEOF_BITS(b) (sizeof(b) * 8)
+
+#define BYTES_IN_QWORD 0x8
+#define BYTES_IN_XMM 0x10
+#define BYTES_IN_YMM 0x20
+#define BYTES_IN_ZMM 0x40
+
+#define BITS_IN_YMM (BYTES_IN_YMM * 8)
+#define BITS_IN_ZMM (BYTES_IN_ZMM * 8)
+
+#define WORDS_IN_YMM (BYTES_IN_YMM / sizeof(uint16_t))
+#define WORDS_IN_ZMM (BYTES_IN_ZMM / sizeof(uint16_t))
+
+#define QWORDS_IN_XMM (BYTES_IN_XMM / sizeof(uint64_t))
+#define QWORDS_IN_YMM (BYTES_IN_YMM / sizeof(uint64_t))
+#define QWORDS_IN_ZMM (BYTES_IN_ZMM / sizeof(uint64_t))
+
+// Copied from (Kaz answer)
+// https://stackoverflow.com/questions/466204/rounding-up-to-next-power-of-2
+#define UPTOPOW2_0(v) ((v)-1)
+#define UPTOPOW2_1(v) (UPTOPOW2_0(v) | (UPTOPOW2_0(v) >> 1))
+#define UPTOPOW2_2(v) (UPTOPOW2_1(v) | (UPTOPOW2_1(v) >> 2))
+#define UPTOPOW2_3(v) (UPTOPOW2_2(v) | (UPTOPOW2_2(v) >> 4))
+#define UPTOPOW2_4(v) (UPTOPOW2_3(v) | (UPTOPOW2_3(v) >> 8))
+#define UPTOPOW2_5(v) (UPTOPOW2_4(v) | (UPTOPOW2_4(v) >> 16))
+
+#define UPTOPOW2(v) (UPTOPOW2_5(v) + 1)
+
+// Works only for 0 < v < 512
+#define LOG2_MSB(v) \
+ ((v) == 0 \
+ ? 0 \
+ : ((v) < 2 \
+ ? 1 \
+ : ((v) < 4 \
+ ? 2 \
+ : ((v) < 8 \
+ ? 3 \
+ : ((v) < 16 \
+ ? 4 \
+ : ((v) < 32 \
+ ? 5 \
+ : ((v) < 64 \
+ ? 6 \
+ : ((v) < 128 ? 7 \
+ : ((v) < 256 ? 8 : 9)))))))))
+
+////////////////////////////////////////////
+// Debug
+///////////////////////////////////////////
+
+#if defined(VERBOSE)
+# include <stdio.h>
+
+# define DMSG(...) \
+ { \
+ printf(__VA_ARGS__); \
+ }
+#else
+# define DMSG(...)
+#endif
+
+////////////////////////////////////////////
+// Printing
+///////////////////////////////////////////
+//#define PRINT_IN_BE
+//#define NO_SPACE
+//#define NO_NEWLINE
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/error.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/error.c
new file mode 100644
index 0000000000..9f779b7df9
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/error.c
@@ -0,0 +1,10 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#include "error.h"
+
+__thread _bike_err_t bike_errno;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/error.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/error.h
new file mode 100644
index 0000000000..b1b9db6d5e
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/error.h
@@ -0,0 +1,33 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+#include "utils/s2n_safety.h"
+
+#define SUCCESS 0
+#define FAIL (-1)
+
+#define ret_t int __attribute__((warn_unused_result))
+
+enum _bike_err
+{
+ E_DECODING_FAILURE = 1,
+ E_AES_CTR_PRF_INIT_FAIL = 2,
+ E_AES_OVER_USED = 3,
+ EXTERNAL_LIB_ERROR_OPENSSL = 4,
+ E_FAIL_TO_GET_SEED = 5
+};
+
+typedef enum _bike_err _bike_err_t;
+
+extern __thread _bike_err_t bike_errno;
+#define BIKE_ERROR(x) \
+ do { \
+ bike_errno = (x); \
+ return FAIL; \
+ } while(0)
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x.h
new file mode 100644
index 0000000000..f4cdb53a80
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x.h
@@ -0,0 +1,29 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+#include "types.h"
+
+// c = a+b mod (x^r - 1)
+_INLINE_ void
+gf2x_mod_add(OUT pad_r_t *c, IN const pad_r_t *a, IN const pad_r_t *b)
+{
+ const uint64_t *a_qwords = (const uint64_t *)a;
+ const uint64_t *b_qwords = (const uint64_t *)b;
+ uint64_t * c_qwords = (uint64_t *)c;
+
+ for(size_t i = 0; i < R_PADDED_QWORDS; i++) {
+ c_qwords[i] = a_qwords[i] ^ b_qwords[i];
+ }
+}
+
+// c = a*b mod (x^r - 1)
+void gf2x_mod_mul(OUT pad_r_t *c, IN const pad_r_t *a, IN const pad_r_t *b);
+
+// c = a^-1 mod (x^r - 1)
+void gf2x_mod_inv(OUT pad_r_t *c, IN const pad_r_t *a);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_internal.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_internal.h
new file mode 100644
index 0000000000..a87478aba1
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_internal.h
@@ -0,0 +1,177 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+// For size_t
+#include <stdlib.h>
+
+#include "pq-crypto/s2n_pq.h"
+#include "types.h"
+
+// The size in quadwords of the operands in the gf2x_mul_base function
+// for different implementations.
+#define GF2X_PORT_BASE_QWORDS (1)
+#define GF2X_PCLMUL_BASE_QWORDS (8)
+#define GF2X_VPCLMUL_BASE_QWORDS (16)
+
+// ------------------ FUNCTIONS NEEDED FOR GF2X MULTIPLICATION ------------------
+// GF2X multiplication of a and b of size GF2X_BASE_QWORDS, c = a * b
+void gf2x_mul_base_port(OUT uint64_t *c,
+ IN const uint64_t *a,
+ IN const uint64_t *b);
+void karatzuba_add1_port(OUT uint64_t *alah,
+ OUT uint64_t *blbh,
+ IN const uint64_t *a,
+ IN const uint64_t *b,
+ IN const size_t qwords_len);
+void karatzuba_add2_port(OUT uint64_t *z,
+ IN const uint64_t *x,
+ IN const uint64_t *y,
+ IN const size_t qwords_len);
+void karatzuba_add3_port(OUT uint64_t *c,
+ IN const uint64_t *mid,
+ IN const size_t qwords_len);
+
+// -------------------- FUNCTIONS NEEDED FOR GF2X INVERSION --------------------
+// c = a^2
+void gf2x_sqr_port(OUT dbl_pad_r_t *c, IN const pad_r_t *a);
+// The k-squaring function computes c = a^(2^k) % (x^r - 1),
+// It is required by inversion, where l_param is derived from k.
+void k_sqr_port(OUT pad_r_t *c, IN const pad_r_t *a, IN size_t l_param);
+// c = a mod (x^r - 1)
+void gf2x_red_port(OUT pad_r_t *c, IN const dbl_pad_r_t *a);
+
+// AVX2 versions of the functions
+#if defined(S2N_BIKE_R3_AVX2)
+void karatzuba_add1_avx2(OUT uint64_t *alah,
+ OUT uint64_t *blbh,
+ IN const uint64_t *a,
+ IN const uint64_t *b,
+ IN const size_t qwords_len);
+void karatzuba_add2_avx2(OUT uint64_t *z,
+ IN const uint64_t *x,
+ IN const uint64_t *y,
+ IN const size_t qwords_len);
+void karatzuba_add3_avx2(OUT uint64_t *c,
+ IN const uint64_t *mid,
+ IN const size_t qwords_len);
+void k_sqr_avx2(OUT pad_r_t *c, IN const pad_r_t *a, IN size_t l_param);
+void gf2x_red_avx2(OUT pad_r_t *c, IN const dbl_pad_r_t *a);
+#endif
+
+// AVX512 versions of the functions
+#if defined(S2N_BIKE_R3_AVX512)
+void karatzuba_add1_avx512(OUT uint64_t *alah,
+ OUT uint64_t *blbh,
+ IN const uint64_t *a,
+ IN const uint64_t *b,
+ IN const size_t qwords_len);
+void karatzuba_add2_avx512(OUT uint64_t *z,
+ IN const uint64_t *x,
+ IN const uint64_t *y,
+ IN const size_t qwords_len);
+void karatzuba_add3_avx512(OUT uint64_t *c,
+ IN const uint64_t *mid,
+ IN const size_t qwords_len);
+void k_sqr_avx512(OUT pad_r_t *c, IN const pad_r_t *a, IN size_t l_param);
+void gf2x_red_avx512(OUT pad_r_t *c, IN const dbl_pad_r_t *a);
+#endif
+
+// PCLMUL based multiplication
+#if defined(S2N_BIKE_R3_PCLMUL)
+void gf2x_mul_base_pclmul(OUT uint64_t *c,
+ IN const uint64_t *a,
+ IN const uint64_t *b);
+void gf2x_sqr_pclmul(OUT dbl_pad_r_t *c, IN const pad_r_t *a);
+#endif
+
+// VPCLMUL based multiplication
+#if defined(S2N_BIKE_R3_VPCLMUL)
+void gf2x_mul_base_vpclmul(OUT uint64_t *c,
+ IN const uint64_t *a,
+ IN const uint64_t *b);
+void gf2x_sqr_vpclmul(OUT dbl_pad_r_t *c, IN const pad_r_t *a);
+#endif
+
+// GF2X methods struct
+typedef struct gf2x_ctx_st {
+ size_t mul_base_qwords;
+ void (*mul_base)(OUT uint64_t *c, IN const uint64_t *a, IN const uint64_t *b);
+ void (*karatzuba_add1)(OUT uint64_t *alah,
+ OUT uint64_t *blbh,
+ IN const uint64_t *a,
+ IN const uint64_t *b,
+ IN const size_t qwords_len);
+ void (*karatzuba_add2)(OUT uint64_t *z,
+ IN const uint64_t *x,
+ IN const uint64_t *y,
+ IN const size_t qwords_len);
+ void (*karatzuba_add3)(OUT uint64_t *c,
+ IN const uint64_t *mid,
+ IN const size_t qwords_len);
+
+ void (*sqr)(OUT dbl_pad_r_t *c, IN const pad_r_t *a);
+ void (*k_sqr)(OUT pad_r_t *c, IN const pad_r_t *a, IN size_t l_param);
+
+ void (*red)(OUT pad_r_t *c, IN const dbl_pad_r_t *a);
+} gf2x_ctx;
+
+// Used in gf2x_inv.c to avoid initializing the context many times.
+void gf2x_mod_mul_with_ctx(OUT pad_r_t *c,
+ IN const pad_r_t *a,
+ IN const pad_r_t *b,
+ IN const gf2x_ctx *ctx);
+
+_INLINE_ void gf2x_ctx_init(gf2x_ctx *ctx)
+{
+#if defined(S2N_BIKE_R3_AVX512)
+ if(s2n_bike_r3_is_avx512_enabled()) {
+ ctx->karatzuba_add1 = karatzuba_add1_avx512;
+ ctx->karatzuba_add2 = karatzuba_add2_avx512;
+ ctx->karatzuba_add3 = karatzuba_add3_avx512;
+ ctx->k_sqr = k_sqr_avx512;
+ ctx->red = gf2x_red_avx512;
+ } else
+#endif
+#if defined(S2N_BIKE_R3_AVX2)
+ if(s2n_bike_r3_is_avx2_enabled()) {
+ ctx->karatzuba_add1 = karatzuba_add1_avx2;
+ ctx->karatzuba_add2 = karatzuba_add2_avx2;
+ ctx->karatzuba_add3 = karatzuba_add3_avx2;
+ ctx->k_sqr = k_sqr_avx2;
+ ctx->red = gf2x_red_avx2;
+ } else
+#endif
+ {
+ ctx->karatzuba_add1 = karatzuba_add1_port;
+ ctx->karatzuba_add2 = karatzuba_add2_port;
+ ctx->karatzuba_add3 = karatzuba_add3_port;
+ ctx->k_sqr = k_sqr_port;
+ ctx->red = gf2x_red_port;
+ }
+
+#if defined(S2N_BIKE_R3_VPCLMUL)
+ if(s2n_bike_r3_is_vpclmul_enabled()) {
+ ctx->mul_base_qwords = GF2X_VPCLMUL_BASE_QWORDS;
+ ctx->mul_base = gf2x_mul_base_vpclmul;
+ ctx->sqr = gf2x_sqr_vpclmul;
+ } else
+#endif
+#if defined(S2N_BIKE_R3_PCLMUL)
+ if(s2n_bike_r3_is_pclmul_enabled()) {
+ ctx->mul_base_qwords = GF2X_PCLMUL_BASE_QWORDS;
+ ctx->mul_base = gf2x_mul_base_pclmul;
+ ctx->sqr = gf2x_sqr_pclmul;
+ } else
+#endif
+ {
+ ctx->mul_base_qwords = GF2X_PORT_BASE_QWORDS;
+ ctx->mul_base = gf2x_mul_base_port;
+ ctx->sqr = gf2x_sqr_port;
+ }
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_inv.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_inv.c
new file mode 100644
index 0000000000..bea7ee84b1
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_inv.c
@@ -0,0 +1,156 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ *
+ * The inversion algorithm in this file is based on:
+ * [1] Nir Drucker, Shay Gueron, and Dusan Kostic. 2020. "Fast polynomial
+ * inversion for post quantum QC-MDPC cryptography". Cryptology ePrint Archive,
+ * 2020. https://eprint.iacr.org/2020/298.pdf
+ */
+
+#include "cleanup.h"
+#include "gf2x.h"
+#include "gf2x_internal.h"
+
+// a = a^2 mod (x^r - 1)
+_INLINE_ void gf2x_mod_sqr_in_place(IN OUT pad_r_t *a,
+ OUT dbl_pad_r_t *secure_buffer,
+ IN const gf2x_ctx *ctx)
+{
+ ctx->sqr(secure_buffer, a);
+ ctx->red(a, secure_buffer);
+}
+
+// c = a^2^2^num_sqrs
+_INLINE_ void repeated_squaring(OUT pad_r_t *c,
+ IN pad_r_t * a,
+ IN const size_t num_sqrs,
+ OUT dbl_pad_r_t *sec_buf,
+ IN const gf2x_ctx *ctx)
+{
+ c->val = a->val;
+
+ for(size_t i = 0; i < num_sqrs; i++) {
+ gf2x_mod_sqr_in_place(c, sec_buf, ctx);
+ }
+}
+
+// The gf2x_mod_inv function implements inversion in F_2[x]/(x^R - 1)
+// based on [1](Algorithm 2).
+
+// In every iteration, [1](Algorithm 2) performs two exponentiations:
+// exponentiation 0 (exp0) and exponentiation 1 (exp1) of the form f^(2^k).
+// These exponentiations are computed either by repeated squaring of f, k times,
+// or by a single k-squaring of f. The method for a specific value of k
+// is chosen based on the performance of squaring and k-squaring.
+//
+// Benchmarks on several platforms indicate that a good threshold
+// for switching from repeated squaring to k-squaring is k = 64.
+#define K_SQR_THR (64)
+
+// k-squaring is computed by a permutation of bits of the input polynomial,
+// as defined in [1](Observation 1). The required parameter for the permutation
+// is l = (2^k)^-1 % R.
+// Therefore, there are two sets of parameters for every exponentiation:
+// - exp0_k and exp1_k
+// - exp0_l and exp1_l
+
+// Exponentiation 0 computes f^2^2^(i-1) for 0 < i < MAX_I.
+// Exponentiation 1 computes f^2^((r-2) % 2^i) for 0 < i < MAX_I,
+// only when the i-th bit of (r-2) is 1. Therefore, the value 0 in
+// exp1_k[i] and exp1_l[i] means that exp1 is skipped in i-th iteration.
+
+// To quickly generate all the required parameters in Sage:
+// r = DESIRED_R
+// max_i = floor(log(r-2, 2)) + 1
+// exp0_k = [2^i for i in range(max_i)]
+// exp0_l = [inverse_mod((2^k) % r, r) for k in exp0_k]
+// exp1_k = [(r-2)%(2^i) if ((r-2) & (1<<i)) else 0 for i in range(max_i)]
+// exp1_l = [inverse_mod((2^k) % r, r) if k != 0 else 0 for k in exp1_k]
+
+#if(LEVEL == 1)
+// The parameters below are hard-coded for R=12323
+bike_static_assert((R_BITS == 12323), gf2x_inv_r_doesnt_match_parameters);
+
+// MAX_I = floor(log(r-2)) + 1
+# define MAX_I (14)
+# define EXP0_K_VALS \
+ 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192
+# define EXP0_L_VALS \
+ 6162, 3081, 3851, 5632, 22, 484, 119, 1838, 1742, 3106, 10650, 1608, 10157, \
+ 8816
+# define EXP1_K_VALS 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 33, 4129
+# define EXP1_L_VALS 0, 0, 0, 0, 0, 6162, 0, 0, 0, 0, 0, 0, 242, 5717
+
+#else
+// The parameters below are hard-coded for R=24659
+bike_static_assert((R_BITS == 24659), gf2x_inv_r_doesnt_match_parameters);
+
+// MAX_I = floor(log(r-2)) + 1
+# define MAX_I (15)
+# define EXP0_K_VALS \
+ 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384
+# define EXP0_L_VALS \
+ 12330, 6165, 7706, 3564, 2711, 1139, 15053, 1258, 4388, 20524, 9538, 6393, \
+ 10486, 1715, 6804
+# define EXP1_K_VALS 0, 0, 0, 0, 1, 0, 17, 0, 0, 0, 0, 0, 0, 81, 8273
+# define EXP1_L_VALS 0, 0, 0, 0, 12330, 0, 13685, 0, 0, 0, 0, 0, 0, 23678, 19056
+
+#endif
+
+// Inversion in F_2[x]/(x^R - 1), [1](Algorithm 2).
+// c = a^{-1} mod x^r-1
+void gf2x_mod_inv(OUT pad_r_t *c, IN const pad_r_t *a)
+{
+ // Initialize gf2x methods struct
+ gf2x_ctx ctx = {0};
+ gf2x_ctx_init(&ctx);
+
+ // Note that exp0/1_k/l are predefined constants that depend only on the value
+ // of R. This value is public. Therefore, branches in this function, which
+ // depends on R, are also "public". Code that releases these branches
+ // (taken/not-taken) does not leak secret information.
+ const size_t exp0_k[MAX_I] = {EXP0_K_VALS};
+ const size_t exp0_l[MAX_I] = {EXP0_L_VALS};
+ const size_t exp1_k[MAX_I] = {EXP1_K_VALS};
+ const size_t exp1_l[MAX_I] = {EXP1_L_VALS};
+
+ DEFER_CLEANUP(pad_r_t f = {0}, pad_r_cleanup);
+ DEFER_CLEANUP(pad_r_t g = {0}, pad_r_cleanup);
+ DEFER_CLEANUP(pad_r_t t = {0}, pad_r_cleanup);
+ DEFER_CLEANUP(dbl_pad_r_t sec_buf = {0}, dbl_pad_r_cleanup);
+
+ // Steps 2 and 3 in [1](Algorithm 2)
+ f.val = a->val;
+ t.val = a->val;
+
+ for(size_t i = 1; i < MAX_I; i++) {
+ // Step 5 in [1](Algorithm 2), exponentiation 0: g = f^2^2^(i-1)
+ if(exp0_k[i - 1] <= K_SQR_THR) {
+ repeated_squaring(&g, &f, exp0_k[i - 1], &sec_buf, &ctx);
+ } else {
+ ctx.k_sqr(&g, &f, exp0_l[i - 1]);
+ }
+
+ // Step 6, [1](Algorithm 2): f = f*g
+ gf2x_mod_mul_with_ctx(&f, &g, &f, &ctx);
+
+ if(exp1_k[i] != 0) {
+ // Step 8, [1](Algorithm 2), exponentiation 1: g = f^2^((r-2) % 2^i)
+ if(exp1_k[i] <= K_SQR_THR) {
+ repeated_squaring(&g, &f, exp1_k[i], &sec_buf, &ctx);
+ } else {
+ ctx.k_sqr(&g, &f, exp1_l[i]);
+ }
+
+ // Step 9, [1](Algorithm 2): t = t*g;
+ gf2x_mod_mul_with_ctx(&t, &g, &t, &ctx);
+ }
+ }
+
+ // Step 10, [1](Algorithm 2): c = t^2
+ gf2x_mod_sqr_in_place(&t, &sec_buf, &ctx);
+ c->val = t.val;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx2.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx2.c
new file mode 100644
index 0000000000..91ed73d3f2
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx2.c
@@ -0,0 +1,188 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ *
+ * The k-squaring algorithm in this file is based on:
+ * [1] Nir Drucker, Shay Gueron, and Dusan Kostic. 2020. "Fast polynomial
+ * inversion for post quantum QC-MDPC cryptography". Cryptology ePrint Archive,
+ * 2020. https://eprint.iacr.org/2020/298.pdf
+ */
+
+#if defined(S2N_BIKE_R3_AVX2)
+
+#include "cleanup.h"
+#include "gf2x_internal.h"
+
+#define AVX2_INTERNAL
+#include "x86_64_intrinsic.h"
+
+#define NUM_YMMS (2)
+#define NUM_OF_VALS (NUM_YMMS * WORDS_IN_YMM)
+
+_INLINE_ void generate_map(OUT uint16_t *map, IN const uint16_t l_param)
+{
+ __m256i vmap[NUM_YMMS], vtmp[NUM_YMMS], vr, inc, zero;
+
+ // The permutation map is generated in the following way:
+ // 1. for i = 0 to map size:
+ // 2. map[i] = (i * l_param) % r
+ // However, to avoid the expensive multiplication and modulo operations
+ // we modify the algorithm to:
+ // 1. map[0] = l_param
+ // 2. for i = 1 to map size:
+ // 3. map[i] = map[i - 1] + l_param
+ // 4. if map[i] >= r:
+ // 5. map[i] = map[i] - r
+ // This algorithm is parallelized with vector instructions by processing
+ // certain number of values (NUM_OF_VALS) in parallel. Therefore,
+ // in the beginning we need to initialize the first NUM_OF_VALS elements.
+ for(size_t i = 0; i < NUM_OF_VALS; i++) {
+ map[i] = (i * l_param) % R_BITS;
+ }
+
+ vr = SET1_I16(R_BITS);
+ zero = SET_ZERO;
+
+ // Set the increment vector such that adding it to vmap vectors
+ // gives the next NUM_OF_VALS elements of the map. AVX2 does not
+ // support comparison of vectors where vector elements are considered
+ // as unsigned integers. This is a problem when r > 2^14 because
+ // sum of two values can be greater than 2^15 which would make the it
+ // a negative number when considered as a signed 16-bit integer,
+ // and therefore, the condition in step 4 of the algorithm would be
+ // evaluated incorrectly. So, we use the following trick:
+ // we subtract R from the increment and modify the algorithm:
+ // 1. map[0] = l_param
+ // 2. for i = 1 to map size:
+ // 3. map[i] = map[i - 1] + (l_param - r)
+ // 4. if map[i] < 0:
+ // 5. map[i] = map[i] + r
+ inc = SET1_I16((l_param * NUM_OF_VALS) % R_BITS);
+ inc = SUB_I16(inc, vr);
+
+ // Load the first NUM_OF_VALS elements in the vmap vectors
+ for(size_t i = 0; i < NUM_YMMS; i++) {
+ vmap[i] = LOAD(&map[i * WORDS_IN_YMM]);
+ }
+
+ for(size_t i = NUM_YMMS; i < (R_PADDED / WORDS_IN_YMM); i += NUM_YMMS) {
+ for(size_t j = 0; j < NUM_YMMS; j++) {
+ vmap[j] = ADD_I16(vmap[j], inc);
+ vtmp[j] = CMPGT_I16(zero, vmap[j]);
+ vmap[j] = ADD_I16(vmap[j], vtmp[j] & vr);
+
+ STORE(&map[(i + j) * WORDS_IN_YMM], vmap[j]);
+ }
+ }
+}
+
+// Convert from bytes representation, where every byte holds a single bit,
+// of the polynomial, to a binary representation where every byte
+// holds 8 bits of the polynomial.
+_INLINE_ void bytes_to_bin(OUT pad_r_t *bin_buf, IN const uint8_t *bytes_buf)
+{
+ uint32_t *bin32 = (uint32_t *)bin_buf;
+
+ for(size_t i = 0; i < R_QWORDS * 2; i++) {
+ __m256i t = LOAD(&bytes_buf[i * BYTES_IN_YMM]);
+ bin32[i] = MOVEMASK(t);
+ }
+}
+
+// Convert from binary representation where every byte holds 8 bits
+// of the polynomial, to byte representation where
+// every byte holds a single bit of the polynomial.
+_INLINE_ void bin_to_bytes(OUT uint8_t *bytes_buf, IN const pad_r_t *bin_buf)
+{
+ // The algorithm works by taking every 32 bits of the input and converting
+ // them to 32 bytes where each byte holds one of the bits. The first step is
+ // to broadcast a 32-bit value (call it a) to all elements of vector t.
+ // Then t contains bytes of a in the following order:
+ // t = [ a3 a2 a1 a0 ... a3 a2 a1 a0 ]
+ // where a0 contains the first 8 bits of a, a1 the second 8 bits, etc.
+ // Let the output vector be [ out31 out30 ... out0 ]. We want to store
+ // bit 0 of a in out0 byte, bit 1 of a in out1 byte, ect. (note that
+ // we want to store the bit in the most significant position of a byte
+ // because this is required by MOVEMASK instruction used in bytes_to_bin.)
+ //
+ // Ideally, we would shuffle the bytes of t such that the byte in
+ // i-th position contains i-th bit of val, shift t appropriately and obtain
+ // the result. However, AVX2 doesn't support shift operation on bytes, only
+ // shifts of individual QWORDS (64 bit) and DWORDS (32 bit) are allowed.
+ // Consider the two least significant DWORDS of t:
+ // t = [ ... | a3 a2 a1 a0 | a3 a2 a1 a0 ]
+ // and shift them by 6 and 4 to the left, respectively, to obtain:
+ // t = [ ... | t7 t6 t5 t4 | t3 t2 t1 t0 ]
+ // where t3 = a3 << 6, t2 = a2 << 6, t1 = a1 << 6, t0 = a0 << 6,
+ // and t7 = a3 << 4, t6 = a2 << 4, t5 = a1 << 4, t4 = a0 << 4.
+ // Now we shuffle vector t to obtain vector p such that:
+ // p = [ ... | t12 t12 t8 t8 | t4 t4 t0 t0 ]
+ // Note that in every even position of the vector p we have the right byte
+ // of the input shifted by the required shift. The values in the odd
+ // positions contain the right bytes of the input but they need to be shifted
+ // one more time to the left by 1. By shifting each DWORD of p by 1 we get:
+ // q = [ ... | p7 p6 p5 p4 | p3 p2 p1 p0 ]
+ // where p1 = t0 << 1 = a0 << 7, p3 = t4 << 1 = 5, etc. Therefore, by
+ // blending p and q (taking even positions from p and odd positions from q)
+ // we obtain the desired result.
+
+ __m256i t, p, q;
+
+ const __m256i shift_mask = SET_I32(0, 2, 4, 6, 0, 2, 4, 6);
+
+ const __m256i shuffle_mask =
+ SET_I8(15, 15, 11, 11, 7, 7, 3, 3, 14, 14, 10, 10, 6, 6, 2, 2, 13, 13, 9, 9,
+ 5, 5, 1, 1, 12, 12, 8, 8, 4, 4, 0, 0);
+
+ const __m256i blend_mask = SET1_I16(0x00ff);
+
+ const uint32_t *bin32 = (const uint32_t *)bin_buf;
+
+ for(size_t i = 0; i < R_QWORDS * 2; i++) {
+ t = SET1_I32(bin32[i]);
+ t = SLLV_I32(t, shift_mask);
+
+ p = SHUF_I8(t, shuffle_mask);
+ q = SLLI_I32(p, 1);
+
+ STORE(&bytes_buf[i * 32], BLENDV_I8(p, q, blend_mask));
+ }
+}
+
+// The k-squaring function computes c = a^(2^k) % (x^r - 1).
+// By [1](Observation 1), if
+// a = sum_{j in supp(a)} x^j,
+// then
+// a^(2^k) % (x^r - 1) = sum_{j in supp(a)} x^((j * 2^k) % r).
+// Therefore, k-squaring can be computed as permutation of the bits of "a":
+// pi0 : j --> (j * 2^k) % r.
+// For improved performance, we compute the result by inverted permutation pi1:
+// pi1 : (j * 2^-k) % r --> j.
+// Input argument l_param is defined as the value (2^-k) % r.
+void k_sqr_avx2(OUT pad_r_t *c, IN const pad_r_t *a, IN const size_t l_param)
+{
+ ALIGN(ALIGN_BYTES) uint16_t map[R_PADDED];
+ ALIGN(ALIGN_BYTES) uint8_t a_bytes[R_PADDED];
+ ALIGN(ALIGN_BYTES) uint8_t c_bytes[R_PADDED] = {0};
+
+ // Generate the permutation map defined by pi1 and l_param.
+ generate_map(map, l_param);
+
+ bin_to_bytes(a_bytes, a);
+
+ // Permute "a" using the generated permutation map.
+ for(size_t i = 0; i < R_BITS; i++) {
+ c_bytes[i] = a_bytes[map[i]];
+ }
+
+ bytes_to_bin(c, c_bytes);
+
+ secure_clean(a_bytes, sizeof(a_bytes));
+ secure_clean(c_bytes, sizeof(c_bytes));
+}
+
+#endif
+
+typedef int dummy_typedef_to_avoid_empty_translation_unit_warning;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx512.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx512.c
new file mode 100644
index 0000000000..af2c5738a8
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_avx512.c
@@ -0,0 +1,135 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ *
+ * The k-squaring algorithm in this file is based on:
+ * [1] Nir Drucker, Shay Gueron, and Dusan Kostic. 2020. "Fast polynomial
+ * inversion for post quantum QC-MDPC cryptography". Cryptology ePrint Archive,
+ * 2020. https://eprint.iacr.org/2020/298.pdf
+ */
+
+#if defined(S2N_BIKE_R3_AVX512)
+
+#include "cleanup.h"
+#include "gf2x_internal.h"
+
+#define AVX512_INTERNAL
+#include "x86_64_intrinsic.h"
+
+#define NUM_ZMMS (2)
+#define NUM_OF_VALS (NUM_ZMMS * WORDS_IN_ZMM)
+
+// clang-3.9 doesn't recognize these two macros
+#if !defined(_MM_CMPINT_EQ)
+# define _MM_CMPINT_EQ (0)
+#endif
+
+#if !defined(_MM_CMPINT_NLT)
+# define _MM_CMPINT_NLT (5)
+#endif
+
+_INLINE_ void generate_map(OUT uint16_t *map, IN const size_t l_param)
+{
+ __m512i vmap[NUM_ZMMS], vr, inc;
+ __mmask32 mask[NUM_ZMMS];
+
+ // The permutation map is generated in the following way:
+ // 1. for i = 0 to map size:
+ // 2. map[i] = (i * l_param) % r
+ // However, to avoid the expensive multiplication and modulo operations
+ // we modify the algorithm to:
+ // 1. map[0] = l_param
+ // 2. for i = 1 to map size:
+ // 3. map[i] = map[i - 1] + l_param
+ // 4. if map[i] >= r:
+ // 5. map[i] = map[i] - r
+ // This algorithm is parallelized with vector instructions by processing
+ // certain number of values (NUM_OF_VALS) in parallel. Therefore,
+ // in the beginning we need to initialize the first NUM_OF_VALS elements.
+ for(size_t i = 0; i < NUM_OF_VALS; i++) {
+ map[i] = (i * l_param) % R_BITS;
+ }
+
+ // Set the increment vector such that by adding it to vmap vectors
+ // we will obtain the next NUM_OF_VALS elements of the map.
+ inc = SET1_I16((l_param * NUM_OF_VALS) % R_BITS);
+ vr = SET1_I16(R_BITS);
+
+ // Load the first NUM_OF_VALS elements in the vmap vectors
+ for(size_t i = 0; i < NUM_ZMMS; i++) {
+ vmap[i] = LOAD(&map[i * WORDS_IN_ZMM]);
+ }
+
+ for(size_t i = NUM_ZMMS; i < (R_PADDED / WORDS_IN_ZMM); i += NUM_ZMMS) {
+ for(size_t j = 0; j < NUM_ZMMS; j++) {
+ vmap[j] = ADD_I16(vmap[j], inc);
+ mask[j] = CMPM_U16(vmap[j], vr, _MM_CMPINT_NLT);
+ vmap[j] = MSUB_I16(vmap[j], mask[j], vmap[j], vr);
+
+ STORE(&map[(i + j) * WORDS_IN_ZMM], vmap[j]);
+ }
+ }
+}
+
+// Convert from bytes representation where each byte holds a single bit
+// to binary representation where each byte holds 8 bits of the polynomial
+_INLINE_ void bytes_to_bin(OUT pad_r_t *bin_buf, IN const uint8_t *bytes_buf)
+{
+ uint64_t *bin64 = (uint64_t *)bin_buf;
+
+ __m512i first_bit_mask = SET1_I8(1);
+ for(size_t i = 0; i < R_QWORDS; i++) {
+ __m512i t = LOAD(&bytes_buf[i * BYTES_IN_ZMM]);
+ bin64[i] = CMPM_U8(t, first_bit_mask, _MM_CMPINT_EQ);
+ }
+}
+
+// Convert from binary representation where each byte holds 8 bits
+// to byte representation where each byte holds a single bit of the polynomial
+_INLINE_ void bin_to_bytes(OUT uint8_t *bytes_buf, IN const pad_r_t *bin_buf)
+{
+ const uint64_t *bin64 = (const uint64_t *)bin_buf;
+
+ for(size_t i = 0; i < R_QWORDS; i++) {
+ __m512i t = SET1MZ_I8(bin64[i], 1);
+ STORE(&bytes_buf[i * BYTES_IN_ZMM], t);
+ }
+}
+
+// The k-squaring function computes c = a^(2^k) % (x^r - 1),
+// By [1](Observation 1), if
+// a = sum_{j in supp(a)} x^j,
+// then
+// a^(2^k) % (x^r - 1) = sum_{j in supp(a)} x^((j * 2^k) % r).
+// Therefore, k-squaring can be computed as permutation of the bits of "a":
+// pi0 : j --> (j * 2^k) % r.
+// For improved performance, we compute the result by inverted permutation pi1:
+// pi1 : (j * 2^-k) % r --> j.
+// Input argument l_param is defined as the value (2^-k) % r.
+void k_sqr_avx512(OUT pad_r_t *c, IN const pad_r_t *a, IN const size_t l_param)
+{
+ ALIGN(ALIGN_BYTES) uint16_t map[R_PADDED];
+ ALIGN(ALIGN_BYTES) uint8_t a_bytes[R_PADDED];
+ ALIGN(ALIGN_BYTES) uint8_t c_bytes[R_PADDED] = {0};
+
+ // Generate the permutation map defined by pi1 and l_param.
+ generate_map(map, l_param);
+
+ bin_to_bytes(a_bytes, a);
+
+ // Permute "a" using the generated permutation map.
+ for(size_t i = 0; i < R_BITS; i++) {
+ c_bytes[i] = a_bytes[map[i]];
+ }
+
+ bytes_to_bin(c, c_bytes);
+
+ secure_clean(a_bytes, sizeof(a_bytes));
+ secure_clean(c_bytes, sizeof(c_bytes));
+}
+
+#endif
+
+typedef int dummy_typedef_to_avoid_empty_translation_unit_warning;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_portable.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_portable.c
new file mode 100644
index 0000000000..c757687f58
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_ksqr_portable.c
@@ -0,0 +1,48 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ *
+ * The k-squaring algorithm in this file is based on:
+ * [1] Nir Drucker, Shay Gueron, and Dusan Kostic. 2020. "Fast polynomial
+ * inversion for post quantum QC-MDPC cryptography". Cryptology ePrint Archive,
+ * 2020. https://eprint.iacr.org/2020/298.pdf
+ */
+
+#include "gf2x_internal.h"
+#include "utilities.h"
+
+#define BITS_IN_BYTE (8)
+
+// The k-squaring function computes c = a^(2^k) % (x^r - 1),
+// By [1](Observation 1), if
+// a = sum_{j in supp(a)} x^j,
+// then
+// a^(2^k) % (x^r - 1) = sum_{j in supp(a)} x^((j * 2^k) % r).
+// Therefore, k-squaring can be computed as permutation of the bits of "a":
+// pi0 : j --> (j * 2^k) % r.
+// For improved performance, we compute the result by inverted permutation pi1:
+// pi1 : (j * 2^-k) % r --> j.
+// Input argument l_param is defined as the value (2^-k) % r.
+void k_sqr_port(OUT pad_r_t *c, IN const pad_r_t *a, IN const size_t l_param)
+{
+ bike_memset(c->val.raw, 0, sizeof(c->val));
+
+ // Compute the result byte by byte
+ size_t idx = 0;
+ for(size_t i = 0; i < R_BYTES; i++) {
+ for(size_t j = 0; j < BITS_IN_BYTE; j++, idx++) {
+ // Bit of "c" at position idx is set to the value of
+ // the bit of "a" at position pi1(idx) = (l_param * idx) % R_BITS.
+ size_t pos = (l_param * idx) % R_BITS;
+
+ size_t pos_byte = pos >> 3;
+ size_t pos_bit = pos & 7;
+ uint8_t bit = (a->val.raw[pos_byte] >> pos_bit) & 1;
+
+ c->val.raw[i] |= (bit << j);
+ }
+ }
+ c->val.raw[R_BYTES - 1] &= LAST_R_BYTE_MASK;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul.c
new file mode 100644
index 0000000000..ae1d7a510a
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul.c
@@ -0,0 +1,113 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#include <assert.h>
+
+#include "cleanup.h"
+#include "gf2x.h"
+#include "gf2x_internal.h"
+
+// The secure buffer size required for Karatsuba is computed by:
+// size(n) = 3*n/2 + size(n/2) = 3*sum_{i}{n/2^i} < 3n
+#define SECURE_BUFFER_QWORDS (3 * R_PADDED_QWORDS)
+
+// Karatsuba multiplication algorithm.
+// Input arguments a and b are padded with zeros, here:
+// - n: real number of digits in a and b (R_QWORDS)
+// - n_padded: padded number of digits of a and b (assumed to be power of 2)
+// A buffer sec_buf is used for storing temporary data between recursion calls.
+// It might contain secrets, and therefore should be securely cleaned after
+// completion.
+_INLINE_ void karatzuba(OUT uint64_t *c,
+ IN const uint64_t *a,
+ IN const uint64_t *b,
+ IN const size_t qwords_len,
+ IN const size_t qwords_len_pad,
+ uint64_t * sec_buf,
+ IN const gf2x_ctx *ctx)
+{
+ if(qwords_len <= ctx->mul_base_qwords) {
+ ctx->mul_base(c, a, b);
+ return;
+ }
+
+ const size_t half_qw_len = qwords_len_pad >> 1;
+
+ // Split a and b into low and high parts of size n_padded/2
+ const uint64_t *a_lo = a;
+ const uint64_t *b_lo = b;
+ const uint64_t *a_hi = &a[half_qw_len];
+ const uint64_t *b_hi = &b[half_qw_len];
+
+ // Split c into 4 parts of size n_padded/2 (the last ptr is not needed)
+ uint64_t *c0 = c;
+ uint64_t *c1 = &c[half_qw_len];
+ uint64_t *c2 = &c[half_qw_len * 2];
+
+ // Allocate 3 ptrs of size n_padded/2 on sec_buf
+ uint64_t *alah = sec_buf;
+ uint64_t *blbh = &sec_buf[half_qw_len];
+ uint64_t *tmp = &sec_buf[half_qw_len * 2];
+
+ // Move sec_buf ptr to the first free location for the next recursion call
+ sec_buf = &sec_buf[half_qw_len * 3];
+
+ // Compute a_lo*b_lo and store the result in (c1|c0)
+ karatzuba(c0, a_lo, b_lo, half_qw_len, half_qw_len, sec_buf, ctx);
+
+ // If the real number of digits n is less or equal to n_padded/2 then:
+ // a_hi = 0 and b_hi = 0
+ // and
+ // (a_hi|a_lo)*(b_hi|b_lo) = a_lo*b_lo
+ // so we can skip the remaining two multiplications
+ if(qwords_len > half_qw_len) {
+ // Compute a_hi*b_hi and store the result in (c3|c2)
+ karatzuba(c2, a_hi, b_hi, qwords_len - half_qw_len, half_qw_len, sec_buf,
+ ctx);
+
+ // Compute alah = (a_lo + a_hi) and blbh = (b_lo + b_hi)
+ ctx->karatzuba_add1(alah, blbh, a, b, half_qw_len);
+
+ // Compute (c1 + c2) and store the result in tmp
+ ctx->karatzuba_add2(tmp, c1, c2, half_qw_len);
+
+ // Compute alah*blbh and store the result in (c2|c1)
+ karatzuba(c1, alah, blbh, half_qw_len, half_qw_len, sec_buf, ctx);
+
+ // Add (tmp|tmp) and (c3|c0) to (c2|c1)
+ ctx->karatzuba_add3(c0, tmp, half_qw_len);
+ }
+}
+
+void gf2x_mod_mul_with_ctx(OUT pad_r_t *c,
+ IN const pad_r_t *a,
+ IN const pad_r_t *b,
+ IN const gf2x_ctx *ctx)
+{
+ bike_static_assert((R_PADDED_BYTES % 2 == 0), karatzuba_n_is_odd);
+
+ DEFER_CLEANUP(dbl_pad_r_t t = {0}, dbl_pad_r_cleanup);
+ ALIGN(ALIGN_BYTES) uint64_t secure_buffer[SECURE_BUFFER_QWORDS];
+
+ karatzuba((uint64_t *)&t, (const uint64_t *)a, (const uint64_t *)b, R_QWORDS,
+ R_PADDED_QWORDS, secure_buffer, ctx);
+
+ ctx->red(c, &t);
+
+ secure_clean((uint8_t *)secure_buffer, sizeof(secure_buffer));
+}
+
+void gf2x_mod_mul(OUT pad_r_t *c, IN const pad_r_t *a, IN const pad_r_t *b)
+{
+ bike_static_assert((R_PADDED_BYTES % 2 == 0), karatzuba_n_is_odd);
+
+ // Initialize gf2x methods struct
+ gf2x_ctx ctx = {0};
+ gf2x_ctx_init(&ctx);
+
+ gf2x_mod_mul_with_ctx(c, a, b, &ctx);
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx2.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx2.c
new file mode 100644
index 0000000000..8f9c17dc09
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx2.c
@@ -0,0 +1,109 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#if defined(S2N_BIKE_R3_AVX2)
+
+#include <assert.h>
+
+#include "cleanup.h"
+#include "gf2x_internal.h"
+
+#define AVX2_INTERNAL
+#include "x86_64_intrinsic.h"
+
+void karatzuba_add1_avx2(OUT uint64_t *alah,
+ OUT uint64_t *blbh,
+ IN const uint64_t *a,
+ IN const uint64_t *b,
+ IN const size_t qwords_len)
+{
+ assert(qwords_len % REG_QWORDS == 0);
+
+ REG_T va0, va1, vb0, vb1;
+
+ for(size_t i = 0; i < qwords_len; i += REG_QWORDS) {
+ va0 = LOAD(&a[i]);
+ va1 = LOAD(&a[i + qwords_len]);
+ vb0 = LOAD(&b[i]);
+ vb1 = LOAD(&b[i + qwords_len]);
+
+ STORE(&alah[i], va0 ^ va1);
+ STORE(&blbh[i], vb0 ^ vb1);
+ }
+}
+
+void karatzuba_add2_avx2(OUT uint64_t *z,
+ IN const uint64_t *x,
+ IN const uint64_t *y,
+ IN const size_t qwords_len)
+{
+ assert(qwords_len % REG_QWORDS == 0);
+
+ REG_T vx, vy;
+
+ for(size_t i = 0; i < qwords_len; i += REG_QWORDS) {
+ vx = LOAD(&x[i]);
+ vy = LOAD(&y[i]);
+
+ STORE(&z[i], vx ^ vy);
+ }
+}
+
+void karatzuba_add3_avx2(OUT uint64_t *c,
+ IN const uint64_t *mid,
+ IN const size_t qwords_len)
+{
+ assert(qwords_len % REG_QWORDS == 0);
+
+ REG_T vr0, vr1, vr2, vr3, vt;
+
+ uint64_t *c0 = c;
+ uint64_t *c1 = &c[qwords_len];
+ uint64_t *c2 = &c[2 * qwords_len];
+ uint64_t *c3 = &c[3 * qwords_len];
+
+ for(size_t i = 0; i < qwords_len; i += REG_QWORDS) {
+ vr0 = LOAD(&c0[i]);
+ vr1 = LOAD(&c1[i]);
+ vr2 = LOAD(&c2[i]);
+ vr3 = LOAD(&c3[i]);
+ vt = LOAD(&mid[i]);
+
+ STORE(&c1[i], vt ^ vr0 ^ vr1);
+ STORE(&c2[i], vt ^ vr2 ^ vr3);
+ }
+}
+
+// c = a mod (x^r - 1)
+void gf2x_red_avx2(OUT pad_r_t *c, IN const dbl_pad_r_t *a)
+{
+ const uint64_t *a64 = (const uint64_t *)a;
+ uint64_t * c64 = (uint64_t *)c;
+
+ for(size_t i = 0; i < R_QWORDS; i += REG_QWORDS) {
+ REG_T vt0 = LOAD(&a64[i]);
+ REG_T vt1 = LOAD(&a64[i + R_QWORDS]);
+ REG_T vt2 = LOAD(&a64[i + R_QWORDS - 1]);
+
+ vt1 = SLLI_I64(vt1, LAST_R_QWORD_TRAIL);
+ vt2 = SRLI_I64(vt2, LAST_R_QWORD_LEAD);
+
+ vt0 ^= (vt1 | vt2);
+
+ STORE(&c64[i], vt0);
+ }
+
+ c64[R_QWORDS - 1] &= LAST_R_QWORD_MASK;
+
+ // Clean the secrets from the upper part of c
+ secure_clean((uint8_t *)&c64[R_QWORDS],
+ (R_PADDED_QWORDS - R_QWORDS) * sizeof(uint64_t));
+}
+
+#endif
+
+typedef int dummy_typedef_to_avoid_empty_translation_unit_warning;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx512.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx512.c
new file mode 100644
index 0000000000..78ce9683ad
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_avx512.c
@@ -0,0 +1,109 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#if defined(S2N_BIKE_R3_AVX512)
+
+#include <assert.h>
+
+#include "cleanup.h"
+#include "gf2x_internal.h"
+
+#define AVX512_INTERNAL
+#include "x86_64_intrinsic.h"
+
+void karatzuba_add1_avx512(OUT uint64_t *alah,
+ OUT uint64_t *blbh,
+ IN const uint64_t *a,
+ IN const uint64_t *b,
+ IN const size_t qwords_len)
+{
+ assert(qwords_len % REG_QWORDS == 0);
+
+ REG_T va0, va1, vb0, vb1;
+
+ for(size_t i = 0; i < qwords_len; i += REG_QWORDS) {
+ va0 = LOAD(&a[i]);
+ va1 = LOAD(&a[i + qwords_len]);
+ vb0 = LOAD(&b[i]);
+ vb1 = LOAD(&b[i + qwords_len]);
+
+ STORE(&alah[i], va0 ^ va1);
+ STORE(&blbh[i], vb0 ^ vb1);
+ }
+}
+
+void karatzuba_add2_avx512(OUT uint64_t *z,
+ IN const uint64_t *x,
+ IN const uint64_t *y,
+ IN const size_t qwords_len)
+{
+ assert(qwords_len % REG_QWORDS == 0);
+
+ REG_T vx, vy;
+
+ for(size_t i = 0; i < qwords_len; i += REG_QWORDS) {
+ vx = LOAD(&x[i]);
+ vy = LOAD(&y[i]);
+
+ STORE(&z[i], vx ^ vy);
+ }
+}
+
+void karatzuba_add3_avx512(OUT uint64_t *c,
+ IN const uint64_t *mid,
+ IN const size_t qwords_len)
+{
+ assert(qwords_len % REG_QWORDS == 0);
+
+ REG_T vr0, vr1, vr2, vr3, vt;
+
+ uint64_t *c0 = c;
+ uint64_t *c1 = &c[qwords_len];
+ uint64_t *c2 = &c[2 * qwords_len];
+ uint64_t *c3 = &c[3 * qwords_len];
+
+ for(size_t i = 0; i < qwords_len; i += REG_QWORDS) {
+ vr0 = LOAD(&c0[i]);
+ vr1 = LOAD(&c1[i]);
+ vr2 = LOAD(&c2[i]);
+ vr3 = LOAD(&c3[i]);
+ vt = LOAD(&mid[i]);
+
+ STORE(&c1[i], vt ^ vr0 ^ vr1);
+ STORE(&c2[i], vt ^ vr2 ^ vr3);
+ }
+}
+
+// c = a mod (x^r - 1)
+void gf2x_red_avx512(OUT pad_r_t *c, IN const dbl_pad_r_t *a)
+{
+ const uint64_t *a64 = (const uint64_t *)a;
+ uint64_t * c64 = (uint64_t *)c;
+
+ for(size_t i = 0; i < R_QWORDS; i += REG_QWORDS) {
+ REG_T vt0 = LOAD(&a64[i]);
+ REG_T vt1 = LOAD(&a64[i + R_QWORDS]);
+ REG_T vt2 = LOAD(&a64[i + R_QWORDS - 1]);
+
+ vt1 = SLLI_I64(vt1, LAST_R_QWORD_TRAIL);
+ vt2 = SRLI_I64(vt2, LAST_R_QWORD_LEAD);
+
+ vt0 ^= (vt1 | vt2);
+
+ STORE(&c64[i], vt0);
+ }
+
+ c64[R_QWORDS - 1] &= LAST_R_QWORD_MASK;
+
+ // Clean the secrets from the upper part of c
+ secure_clean((uint8_t *)&c64[R_QWORDS],
+ (R_PADDED_QWORDS - R_QWORDS) * sizeof(uint64_t));
+}
+
+#endif
+
+typedef int dummy_typedef_to_avoid_empty_translation_unit_warning;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_pclmul.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_pclmul.c
new file mode 100644
index 0000000000..1d4553997c
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_pclmul.c
@@ -0,0 +1,155 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#if defined(S2N_BIKE_R3_PCLMUL)
+
+#include <immintrin.h>
+
+#include "gf2x_internal.h"
+
+#define LOAD128(mem) _mm_loadu_si128((const void *)(mem))
+#define STORE128(mem, reg) _mm_storeu_si128((void *)(mem), (reg))
+#define UNPACKLO(x, y) _mm_unpacklo_epi64((x), (y))
+#define UNPACKHI(x, y) _mm_unpackhi_epi64((x), (y))
+#define CLMUL(x, y, imm) _mm_clmulepi64_si128((x), (y), (imm))
+#define BSRLI(x, imm) _mm_srli_si128((x), (imm))
+#define BSLLI(x, imm) _mm_slli_si128((x), (imm))
+
+// 4x4 Karatsuba multiplication
+_INLINE_ void gf2x_mul4_int(OUT __m128i c[4],
+ IN const __m128i a_lo,
+ IN const __m128i a_hi,
+ IN const __m128i b_lo,
+ IN const __m128i b_hi)
+{
+ // a_lo = [a1 | a0]; a_hi = [a3 | a2];
+ // b_lo = [b1 | b0]; b_hi = [b3 | b2];
+ // 4x4 Karatsuba requires three 2x2 multiplications:
+ // (1) a_lo * b_lo
+ // (2) a_hi * b_hi
+ // (3) aa * bb = (a_lo + a_hi) * (b_lo + b_hi)
+ // Each of the three 2x2 multiplications requires three 1x1 multiplications:
+ // (1) is computed by a0*b0, a1*b1, (a0+a1)*(b0+b1)
+ // (2) is computed by a2*b2, a3*b3, (a2+a3)*(b2+b3)
+ // (3) is computed by aa0*bb0, aa1*bb1, (aa0+aa1)*(bb0+bb1)
+ // All the required additions are performed in the end.
+
+ __m128i aa, bb;
+ __m128i xx, yy, uu, vv, m;
+ __m128i lo[2], hi[2], mi[2];
+ __m128i t[9];
+
+ aa = a_lo ^ a_hi;
+ bb = b_lo ^ b_hi;
+
+ // xx <-- [(a2+a3) | (a0+a1)]
+ // yy <-- [(b2+b3) | (b0+b1)]
+ xx = UNPACKLO(a_lo, a_hi);
+ yy = UNPACKLO(b_lo, b_hi);
+ xx = xx ^ UNPACKHI(a_lo, a_hi);
+ yy = yy ^ UNPACKHI(b_lo, b_hi);
+
+ // uu <-- [ 0 | (aa0+aa1)]
+ // vv <-- [ 0 | (bb0+bb1)]
+ uu = aa ^ BSRLI(aa, 8);
+ vv = bb ^ BSRLI(bb, 8);
+
+ // 9 multiplications
+ t[0] = CLMUL(a_lo, b_lo, 0x00);
+ t[1] = CLMUL(a_lo, b_lo, 0x11);
+ t[2] = CLMUL(a_hi, b_hi, 0x00);
+ t[3] = CLMUL(a_hi, b_hi, 0x11);
+ t[4] = CLMUL(xx, yy, 0x00);
+ t[5] = CLMUL(xx, yy, 0x11);
+ t[6] = CLMUL(aa, bb, 0x00);
+ t[7] = CLMUL(aa, bb, 0x11);
+ t[8] = CLMUL(uu, vv, 0x00);
+
+ t[4] ^= (t[0] ^ t[1]);
+ t[5] ^= (t[2] ^ t[3]);
+ t[8] ^= (t[6] ^ t[7]);
+
+ lo[0] = t[0] ^ BSLLI(t[4], 8);
+ lo[1] = t[1] ^ BSRLI(t[4], 8);
+ hi[0] = t[2] ^ BSLLI(t[5], 8);
+ hi[1] = t[3] ^ BSRLI(t[5], 8);
+ mi[0] = t[6] ^ BSLLI(t[8], 8);
+ mi[1] = t[7] ^ BSRLI(t[8], 8);
+
+ m = lo[1] ^ hi[0];
+
+ c[0] = lo[0];
+ c[1] = lo[0] ^ mi[0] ^ m;
+ c[2] = hi[1] ^ mi[1] ^ m;
+ c[3] = hi[1];
+}
+
+// 512x512bit multiplication performed by Karatsuba algorithm
+// where a and b are considered as having 8 digits of size 64 bits.
+void gf2x_mul_base_pclmul(OUT uint64_t *c,
+ IN const uint64_t *a,
+ IN const uint64_t *b)
+{
+ __m128i va[4], vb[4];
+ __m128i aa[2], bb[2];
+ __m128i lo[4], hi[4], mi[4], m[2];
+
+ for(size_t i = 0; i < 4; i++) {
+ va[i] = LOAD128(&a[QWORDS_IN_XMM * i]);
+ vb[i] = LOAD128(&b[QWORDS_IN_XMM * i]);
+ }
+
+ // Multiply the low and the high halves of a and b
+ // lo <-- a_lo * b_lo
+ // hi <-- a_hi * b_hi
+ gf2x_mul4_int(lo, va[0], va[1], vb[0], vb[1]);
+ gf2x_mul4_int(hi, va[2], va[3], vb[2], vb[3]);
+
+ // Compute the middle multiplication
+ // aa <-- a_lo + a_hi
+ // bb <-- b_lo + b_hi
+ // mi <-- aa * bb
+ aa[0] = va[0] ^ va[2];
+ aa[1] = va[1] ^ va[3];
+ bb[0] = vb[0] ^ vb[2];
+ bb[1] = vb[1] ^ vb[3];
+ gf2x_mul4_int(mi, aa[0], aa[1], bb[0], bb[1]);
+
+ m[0] = lo[2] ^ hi[0];
+ m[1] = lo[3] ^ hi[1];
+
+ STORE128(&c[0 * QWORDS_IN_XMM], lo[0]);
+ STORE128(&c[1 * QWORDS_IN_XMM], lo[1]);
+ STORE128(&c[2 * QWORDS_IN_XMM], mi[0] ^ lo[0] ^ m[0]);
+ STORE128(&c[3 * QWORDS_IN_XMM], mi[1] ^ lo[1] ^ m[1]);
+ STORE128(&c[4 * QWORDS_IN_XMM], mi[2] ^ hi[2] ^ m[0]);
+ STORE128(&c[5 * QWORDS_IN_XMM], mi[3] ^ hi[3] ^ m[1]);
+ STORE128(&c[6 * QWORDS_IN_XMM], hi[2]);
+ STORE128(&c[7 * QWORDS_IN_XMM], hi[3]);
+}
+
+void gf2x_sqr_pclmul(OUT dbl_pad_r_t *c, IN const pad_r_t *a)
+{
+ __m128i va, vr0, vr1;
+
+ const uint64_t *a64 = (const uint64_t *)a;
+ uint64_t * c64 = (uint64_t *)c;
+
+ for(size_t i = 0; i < (R_XMM * QWORDS_IN_XMM); i += QWORDS_IN_XMM) {
+ va = LOAD128(&a64[i]);
+
+ vr0 = CLMUL(va, va, 0x00);
+ vr1 = CLMUL(va, va, 0x11);
+
+ STORE128(&c64[i * 2], vr0);
+ STORE128(&c64[i * 2 + QWORDS_IN_XMM], vr1);
+ }
+}
+
+#endif
+
+typedef int dummy_typedef_to_avoid_empty_translation_unit_warning;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_portable.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_portable.c
new file mode 100644
index 0000000000..86c21a1e28
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_portable.c
@@ -0,0 +1,77 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#include "gf2x_internal.h"
+#include "utilities.h"
+
+#define LSB3(x) ((x)&7)
+
+// 64x64 bit multiplication
+// The algorithm is based on the windowing method, for example as in:
+// Brent, R. P., Gaudry, P., Thomé, E., & Zimmermann, P. (2008, May), "Faster
+// multiplication in GF (2)[x]". In: International Algorithmic Number Theory
+// Symposium (pp. 153-166). Springer, Berlin, Heidelberg. In this implementation,
+// the last three bits are multiplied using a schoolbook multiplication.
+void gf2x_mul_base_port(OUT uint64_t *c,
+ IN const uint64_t *a,
+ IN const uint64_t *b)
+{
+ uint64_t h = 0, l = 0, g1, g2, u[8];
+ const uint64_t w = 64;
+ const uint64_t s = 3;
+ const uint64_t a0 = a[0];
+ const uint64_t b0 = b[0];
+
+ // Multiplying 64 bits by 7 can results in an overflow of 3 bits.
+ // Therefore, these bits are masked out, and are treated in step 3.
+ const uint64_t b0m = b0 & MASK(61);
+
+ // Step 1: Calculate a multiplication table with 8 entries.
+ u[0] = 0;
+ u[1] = b0m;
+ u[2] = u[1] << 1;
+ u[3] = u[2] ^ b0m;
+ u[4] = u[2] << 1;
+ u[5] = u[4] ^ b0m;
+ u[6] = u[3] << 1;
+ u[7] = u[6] ^ b0m;
+
+ // Step 2: Multiply two elements in parallel in positions i, i+s
+ l = u[LSB3(a0)] ^ (u[LSB3(a0 >> 3)] << 3);
+ h = (u[LSB3(a0 >> 3)] >> 61);
+
+ for(size_t i = (2 * s); i < w; i += (2 * s)) {
+ const size_t i2 = (i + s);
+
+ g1 = u[LSB3(a0 >> i)];
+ g2 = u[LSB3(a0 >> i2)];
+
+ l ^= (g1 << i) ^ (g2 << i2);
+ h ^= (g1 >> (w - i)) ^ (g2 >> (w - i2));
+ }
+
+ // Step 3: Multiply the last three bits.
+ for(size_t i = 61; i < 64; i++) {
+ uint64_t mask = (-((b0 >> i) & 1));
+ l ^= ((a0 << i) & mask);
+ h ^= ((a0 >> (w - i)) & mask);
+ }
+
+ c[0] = l;
+ c[1] = h;
+}
+
+// c = a^2
+void gf2x_sqr_port(OUT dbl_pad_r_t *c, IN const pad_r_t *a)
+{
+ const uint64_t *a64 = (const uint64_t *)a;
+ uint64_t * c64 = (uint64_t *)c;
+
+ for(size_t i = 0; i < R_QWORDS; i++) {
+ gf2x_mul_base_port(&c64[2 * i], &a64[i], &a64[i]);
+ }
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_vpclmul.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_vpclmul.c
new file mode 100644
index 0000000000..c321bf355f
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_base_vpclmul.c
@@ -0,0 +1,135 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#if defined(S2N_BIKE_R3_VPCLMUL)
+
+#include "gf2x_internal.h"
+
+#define AVX512_INTERNAL
+#include "x86_64_intrinsic.h"
+
+#define CLMUL(x, y, imm) _mm512_clmulepi64_epi128((x), (y), (imm))
+
+_INLINE_ void
+mul2_512(OUT __m512i *h, OUT __m512i *l, IN const __m512i a, IN const __m512i b)
+{
+ const __m512i mask_abq = SET_I64(6, 7, 4, 5, 2, 3, 0, 1);
+ const __m512i s1 = a ^ PERMX_I64(a, _MM_SHUFFLE(2, 3, 0, 1));
+ const __m512i s2 = b ^ PERMX_I64(b, _MM_SHUFFLE(2, 3, 0, 1));
+
+ __m512i lq = CLMUL(a, b, 0x00);
+ __m512i hq = CLMUL(a, b, 0x11);
+ __m512i abq = lq ^ hq ^ CLMUL(s1, s2, 0x00);
+ abq = PERMXVAR_I64(mask_abq, abq);
+ *l = MXOR_I64(lq, 0xaa, lq, abq);
+ *h = MXOR_I64(hq, 0x55, hq, abq);
+}
+
+// 8x8 Karatsuba multiplication
+_INLINE_ void gf2x_mul8_512_int(OUT __m512i *zh,
+ OUT __m512i * zl,
+ IN const __m512i a,
+ IN const __m512i b)
+{
+ const __m512i mask0 = SET_I64(13, 12, 5, 4, 9, 8, 1, 0);
+ const __m512i mask1 = SET_I64(15, 14, 7, 6, 11, 10, 3, 2);
+ const __m512i mask2 = SET_I64(3, 2, 1, 0, 7, 6, 5, 4);
+ const __m512i mask3 = SET_I64(11, 10, 9, 8, 3, 2, 1, 0);
+ const __m512i mask4 = SET_I64(15, 14, 13, 12, 7, 6, 5, 4);
+ const __m512i mask_s1 = SET_I64(7, 6, 5, 4, 1, 0, 3, 2);
+ const __m512i mask_s2 = SET_I64(3, 2, 7, 6, 5, 4, 1, 0);
+
+ __m512i xl, xh, xabl, xabh, xab, xab1, xab2;
+ __m512i yl, yh, yabl, yabh, yab;
+ __m512i t[4];
+
+ // Calculate:
+ // AX1^AX3|| AX2^AX3 || AX0^AX2 || AX0^AX1
+ // BX1^BX3|| BX2^BX3 || BX0^BX2 || BX0^BX1
+ // Where (AX1^AX3 || AX0^AX2) stands for (AX1 || AX0)^(AX3 || AX2) = AY0^AY1
+ t[0] = PERMXVAR_I64(mask_s1, a) ^ PERMXVAR_I64(mask_s2, a);
+ t[1] = PERMXVAR_I64(mask_s1, b) ^ PERMXVAR_I64(mask_s2, b);
+
+ // Calculate:
+ // Don't care || AX1^AX3^AX0^AX2
+ // Don't care || BX1^BX3^BX0^BX2
+ t[2] = t[0] ^ VALIGN(t[0], t[0], 4);
+ t[3] = t[1] ^ VALIGN(t[1], t[1], 4);
+
+ mul2_512(&xh, &xl, a, b);
+ mul2_512(&xabh, &xabl, t[0], t[1]);
+ mul2_512(&yabh, &yabl, t[2], t[3]);
+
+ xab = xl ^ xh ^ PERMX2VAR_I64(xabl, mask0, xabh);
+ yl = PERMX2VAR_I64(xl, mask3, xh);
+ yh = PERMX2VAR_I64(xl, mask4, xh);
+ xab1 = VALIGN(xab, xab, 6);
+ xab2 = VALIGN(xab, xab, 2);
+ yl = MXOR_I64(yl, 0x3c, yl, xab1);
+ yh = MXOR_I64(yh, 0x3c, yh, xab2);
+
+ __m512i oxh = PERMX2VAR_I64(xabl, mask1, xabh);
+ __m512i oxl = VALIGN(oxh, oxh, 4);
+ yab = oxl ^ oxh ^ PERMX2VAR_I64(yabl, mask0, yabh);
+ yab = MXOR_I64(oxh, 0x3c, oxh, VALIGN(yab, yab, 2));
+ yab ^= yl ^ yh;
+
+ // Z0 (yl) + Z1 (yab) + Z2 (yh)
+ yab = PERMXVAR_I64(mask2, yab);
+ *zl = MXOR_I64(yl, 0xf0, yl, yab);
+ *zh = MXOR_I64(yh, 0x0f, yh, yab);
+}
+
+// 1024x1024 bit multiplication performed by Karatsuba algorithm.
+// Here, a and b are considered as having 16 digits of size 64 bits.
+void gf2x_mul_base_vpclmul(OUT uint64_t *c,
+ IN const uint64_t *a,
+ IN const uint64_t *b)
+{
+ const __m512i a0 = LOAD(a);
+ const __m512i a1 = LOAD(&a[QWORDS_IN_ZMM]);
+ const __m512i b0 = LOAD(b);
+ const __m512i b1 = LOAD(&b[QWORDS_IN_ZMM]);
+
+ __m512i hi[2], lo[2], mi[2];
+
+ gf2x_mul8_512_int(&lo[1], &lo[0], a0, b0);
+ gf2x_mul8_512_int(&hi[1], &hi[0], a1, b1);
+ gf2x_mul8_512_int(&mi[1], &mi[0], a0 ^ a1, b0 ^ b1);
+
+ __m512i m = lo[1] ^ hi[0];
+
+ STORE(&c[0 * QWORDS_IN_ZMM], lo[0]);
+ STORE(&c[1 * QWORDS_IN_ZMM], mi[0] ^ lo[0] ^ m);
+ STORE(&c[2 * QWORDS_IN_ZMM], mi[1] ^ hi[1] ^ m);
+ STORE(&c[3 * QWORDS_IN_ZMM], hi[1]);
+}
+
+void gf2x_sqr_vpclmul(OUT dbl_pad_r_t *c, IN const pad_r_t *a)
+{
+ __m512i va, vm, vr0, vr1;
+
+ const uint64_t *a64 = (const uint64_t *)a;
+ uint64_t * c64 = (uint64_t *)c;
+
+ vm = SET_I64(7, 3, 6, 2, 5, 1, 4, 0);
+
+ for(size_t i = 0; i < (R_ZMM * QWORDS_IN_ZMM); i += QWORDS_IN_ZMM) {
+ va = LOAD(&a64[i]);
+ va = PERMXVAR_I64(vm, va);
+
+ vr0 = CLMUL(va, va, 0x00);
+ vr1 = CLMUL(va, va, 0x11);
+
+ STORE(&c64[i * 2], vr0);
+ STORE(&c64[i * 2 + QWORDS_IN_ZMM], vr1);
+ }
+}
+
+#endif
+
+typedef int dummy_typedef_to_avoid_empty_translation_unit_warning;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_portable.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_portable.c
new file mode 100644
index 0000000000..187042d44c
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/gf2x_mul_portable.c
@@ -0,0 +1,103 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#include <assert.h>
+
+#include "cleanup.h"
+#include "gf2x_internal.h"
+
+#define PORTABLE_INTERNAL
+#include "x86_64_intrinsic.h"
+
+void karatzuba_add1_port(OUT uint64_t *alah,
+ OUT uint64_t *blbh,
+ IN const uint64_t *a,
+ IN const uint64_t *b,
+ IN const size_t qwords_len)
+{
+ assert(qwords_len % REG_QWORDS == 0);
+
+ REG_T va0, va1, vb0, vb1;
+
+ for(size_t i = 0; i < qwords_len; i += REG_QWORDS) {
+ va0 = LOAD(&a[i]);
+ va1 = LOAD(&a[i + qwords_len]);
+ vb0 = LOAD(&b[i]);
+ vb1 = LOAD(&b[i + qwords_len]);
+
+ STORE(&alah[i], va0 ^ va1);
+ STORE(&blbh[i], vb0 ^ vb1);
+ }
+}
+
+void karatzuba_add2_port(OUT uint64_t *z,
+ IN const uint64_t *x,
+ IN const uint64_t *y,
+ IN const size_t qwords_len)
+{
+ assert(qwords_len % REG_QWORDS == 0);
+
+ REG_T vx, vy;
+
+ for(size_t i = 0; i < qwords_len; i += REG_QWORDS) {
+ vx = LOAD(&x[i]);
+ vy = LOAD(&y[i]);
+
+ STORE(&z[i], vx ^ vy);
+ }
+}
+
+void karatzuba_add3_port(OUT uint64_t *c,
+ IN const uint64_t *mid,
+ IN const size_t qwords_len)
+{
+ assert(qwords_len % REG_QWORDS == 0);
+
+ REG_T vr0, vr1, vr2, vr3, vt;
+
+ uint64_t *c0 = c;
+ uint64_t *c1 = &c[qwords_len];
+ uint64_t *c2 = &c[2 * qwords_len];
+ uint64_t *c3 = &c[3 * qwords_len];
+
+ for(size_t i = 0; i < qwords_len; i += REG_QWORDS) {
+ vr0 = LOAD(&c0[i]);
+ vr1 = LOAD(&c1[i]);
+ vr2 = LOAD(&c2[i]);
+ vr3 = LOAD(&c3[i]);
+ vt = LOAD(&mid[i]);
+
+ STORE(&c1[i], vt ^ vr0 ^ vr1);
+ STORE(&c2[i], vt ^ vr2 ^ vr3);
+ }
+}
+
+// c = a mod (x^r - 1)
+void gf2x_red_port(OUT pad_r_t *c, IN const dbl_pad_r_t *a)
+{
+ const uint64_t *a64 = (const uint64_t *)a;
+ uint64_t * c64 = (uint64_t *)c;
+
+ for(size_t i = 0; i < R_QWORDS; i += REG_QWORDS) {
+ REG_T vt0 = LOAD(&a64[i]);
+ REG_T vt1 = LOAD(&a64[i + R_QWORDS]);
+ REG_T vt2 = LOAD(&a64[i + R_QWORDS - 1]);
+
+ vt1 = SLLI_I64(vt1, LAST_R_QWORD_TRAIL);
+ vt2 = SRLI_I64(vt2, LAST_R_QWORD_LEAD);
+
+ vt0 ^= (vt1 | vt2);
+
+ STORE(&c64[i], vt0);
+ }
+
+ c64[R_QWORDS - 1] &= LAST_R_QWORD_MASK;
+
+ // Clean the secrets from the upper part of c
+ secure_clean((uint8_t *)&c64[R_QWORDS],
+ (R_PADDED_QWORDS - R_QWORDS) * sizeof(uint64_t));
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling.c
new file mode 100644
index 0000000000..a76a31ef87
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling.c
@@ -0,0 +1,170 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#include <assert.h>
+
+#include "sampling.h"
+#include "sampling_internal.h"
+
+// SIMD implementation of is_new function requires the size of wlist
+// to be a multiple of the number of DWORDS in a SIMD register (REG_DWORDS).
+// The function is used both for generating DV and T1 random numbers so we define
+// two separate macros.
+#define AVX512_REG_DWORDS (16)
+#define WLIST_SIZE_ADJUSTED_D \
+ (AVX512_REG_DWORDS * DIVIDE_AND_CEIL(DV, AVX512_REG_DWORDS))
+#define WLIST_SIZE_ADJUSTED_T \
+ (AVX512_REG_DWORDS * DIVIDE_AND_CEIL(T1, AVX512_REG_DWORDS))
+
+// BSR returns ceil(log2(val))
+_INLINE_ uint8_t bit_scan_reverse_vartime(IN uint64_t val)
+{
+ // index is always smaller than 64
+ uint8_t index = 0;
+
+ while(val != 0) {
+ val >>= 1;
+ index++;
+ }
+
+ return index;
+}
+
+_INLINE_ ret_t get_rand_mod_len(OUT uint32_t * rand_pos,
+ IN const uint32_t len,
+ IN OUT aes_ctr_prf_state_t *prf_state)
+{
+ const uint64_t mask = MASK(bit_scan_reverse_vartime(len));
+
+ do {
+ // Generate a 32 bits (pseudo) random value.
+ // This can be optimized to take only 16 bits.
+ POSIX_GUARD(aes_ctr_prf((uint8_t *)rand_pos, prf_state, sizeof(*rand_pos)));
+
+ // Mask relevant bits only
+ (*rand_pos) &= mask;
+
+ // Break if a number that is smaller than len is found
+ if((*rand_pos) < len) {
+ break;
+ }
+
+ } while(1 == 1);
+
+ return SUCCESS;
+}
+
+_INLINE_ void make_odd_weight(IN OUT r_t *r)
+{
+ if(((r_bits_vector_weight(r) % 2) == 1)) {
+ // Already odd
+ return;
+ }
+
+ r->raw[0] ^= 1;
+}
+
+// Returns an array of r pseudorandom bits.
+// No restrictions exist for the top or bottom bits.
+// If the generation requires an odd number, then set must_be_odd=1.
+// The function uses the provided prf context.
+ret_t sample_uniform_r_bits_with_fixed_prf_context(
+ OUT r_t *r,
+ IN OUT aes_ctr_prf_state_t *prf_state,
+ IN const must_be_odd_t must_be_odd)
+{
+ // Generate random data
+ POSIX_GUARD(aes_ctr_prf(r->raw, prf_state, R_BYTES));
+
+ // Mask upper bits of the MSByte
+ r->raw[R_BYTES - 1] &= MASK(R_BITS + 8 - (R_BYTES * 8));
+
+ if(must_be_odd == MUST_BE_ODD) {
+ make_odd_weight(r);
+ }
+
+ return SUCCESS;
+}
+
+ret_t generate_indices_mod_z(OUT idx_t * out,
+ IN const size_t num_indices,
+ IN const size_t z,
+ IN OUT aes_ctr_prf_state_t *prf_state,
+ IN const sampling_ctx *ctx)
+{
+ size_t ctr = 0;
+
+ // Generate num_indices unique (pseudo) random numbers modulo z
+ do {
+ POSIX_GUARD(get_rand_mod_len(&out[ctr], z, prf_state));
+ ctr += ctx->is_new(out, ctr);
+ } while(ctr < num_indices);
+
+ return SUCCESS;
+}
+
+// Returns an array of r pseudorandom bits.
+// No restrictions exist for the top or bottom bits.
+// If the generation requires an odd number, then set must_be_odd = MUST_BE_ODD
+ret_t sample_uniform_r_bits(OUT r_t *r,
+ IN const seed_t * seed,
+ IN const must_be_odd_t must_be_odd)
+{
+ // For the seedexpander
+ DEFER_CLEANUP(aes_ctr_prf_state_t prf_state = {0}, aes_ctr_prf_state_cleanup);
+
+ POSIX_GUARD(init_aes_ctr_prf_state(&prf_state, MAX_AES_INVOKATION, seed));
+
+ POSIX_GUARD(sample_uniform_r_bits_with_fixed_prf_context(r, &prf_state, must_be_odd));
+
+ return SUCCESS;
+}
+
+ret_t generate_sparse_rep(OUT pad_r_t *r,
+ OUT idx_t *wlist,
+ IN OUT aes_ctr_prf_state_t *prf_state)
+{
+
+ // Initialize the sampling context
+ sampling_ctx ctx;
+ sampling_ctx_init(&ctx);
+
+ idx_t wlist_temp[WLIST_SIZE_ADJUSTED_D] = {0};
+
+ POSIX_GUARD(generate_indices_mod_z(wlist_temp, DV, R_BITS, prf_state, &ctx));
+
+ bike_memcpy(wlist, wlist_temp, DV * sizeof(idx_t));
+ ctx.secure_set_bits(r, 0, wlist, DV);
+
+ return SUCCESS;
+}
+
+ret_t generate_error_vector(OUT pad_e_t *e, IN const seed_t *seed)
+{
+ DEFER_CLEANUP(aes_ctr_prf_state_t prf_state = {0}, aes_ctr_prf_state_cleanup);
+
+ POSIX_GUARD(init_aes_ctr_prf_state(&prf_state, MAX_AES_INVOKATION, seed));
+
+ // Initialize the sampling context
+ sampling_ctx ctx;
+ sampling_ctx_init(&ctx);
+
+ idx_t wlist[WLIST_SIZE_ADJUSTED_T] = {0};
+ POSIX_GUARD(generate_indices_mod_z(wlist, T1, N_BITS, &prf_state, &ctx));
+
+ // (e0, e1) hold bits 0..R_BITS-1 and R_BITS..2*R_BITS-1 of the error, resp.
+ ctx.secure_set_bits(&e->val[0], 0, wlist, T1);
+ ctx.secure_set_bits(&e->val[1], R_BITS, wlist, T1);
+
+ // Clean the padding of the elements
+ PE0_RAW(e)[R_BYTES - 1] &= LAST_R_BYTE_MASK;
+ PE1_RAW(e)[R_BYTES - 1] &= LAST_R_BYTE_MASK;
+ bike_memset(&PE0_RAW(e)[R_BYTES], 0, R_PADDED_BYTES - R_BYTES);
+ bike_memset(&PE1_RAW(e)[R_BYTES], 0, R_PADDED_BYTES - R_BYTES);
+
+ return SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling.h
new file mode 100644
index 0000000000..a9d50c9bc2
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling.h
@@ -0,0 +1,40 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+#include <stdlib.h>
+#include "aes_ctr_prf.h"
+#include "pq-crypto/s2n_pq_random.h"
+#include "utils/s2n_result.h"
+#include "utilities.h"
+
+typedef enum
+{
+ NO_RESTRICTION = 0,
+ MUST_BE_ODD = 1
+} must_be_odd_t;
+
+_INLINE_ ret_t get_seeds(OUT seeds_t *seeds) {
+ if(s2n_result_is_ok(s2n_get_random_bytes(seeds->seed[0].raw, sizeof(seeds_t)))) {
+ return SUCCESS;
+ } else {
+ BIKE_ERROR(E_FAIL_TO_GET_SEED);
+ }
+}
+
+// Returns an array of r pseudorandom bits. If an odd
+// weight of r is required, set must_be_odd to MUST_BE_ODD.
+ret_t sample_uniform_r_bits(OUT r_t *r,
+ IN const seed_t *seed,
+ IN must_be_odd_t must_be_odd);
+
+ret_t generate_sparse_rep(OUT pad_r_t *r,
+ OUT idx_t *wlist,
+ IN OUT aes_ctr_prf_state_t *prf_state);
+
+ret_t generate_error_vector(OUT pad_e_t *e, IN const seed_t *seed);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx2.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx2.c
new file mode 100644
index 0000000000..c23be2e86e
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx2.c
@@ -0,0 +1,123 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#if defined(S2N_BIKE_R3_AVX2)
+
+#include <assert.h>
+
+#include "sampling_internal.h"
+
+#define AVX2_INTERNAL
+#include "x86_64_intrinsic.h"
+
+// For improved performance, we process NUM_YMMS amount of data in parallel.
+#define NUM_YMMS (4)
+#define YMMS_QWORDS (QWORDS_IN_YMM * NUM_YMMS)
+
+void secure_set_bits_avx2(OUT pad_r_t * r,
+ IN const size_t first_pos,
+ IN const idx_t *wlist,
+ IN const size_t w_size)
+{
+ // The function assumes that the size of r is a multiple
+ // of the cumulative size of used YMM registers.
+ assert((sizeof(*r) / sizeof(uint64_t)) % YMMS_QWORDS == 0);
+
+ // va vectors hold the bits of the output array "r"
+ // va_pos_qw vectors hold the qw position indices of "r"
+ // The algorithm works as follows:
+ // 1. Initialize va_pos_qw with starting positions of qw's of "r"
+ // va_pos_qw = (3, 2, 1, 0);
+ // 2. While the size of "r" is not exceeded:
+ // 3. For each w in wlist:
+ // 4. Compare the pos_qw of w with positions in va_pos_qw
+ // and for the position which is equal set the appropriate
+ // bit in va vector.
+ // 5. Set va_pos_qw to the next qw positions of "r"
+ __m256i va[NUM_YMMS], va_pos_qw[NUM_YMMS], va_mask;
+ __m256i w_pos_qw, w_pos_bit;
+ __m256i one, inc;
+
+ uint64_t *r64 = (uint64_t *)r;
+
+ one = SET1_I64(1);
+ inc = SET1_I64(QWORDS_IN_YMM);
+
+ // 1. Initialize
+ va_pos_qw[0] = SET_I64(3, 2, 1, 0);
+ for(size_t i = 1; i < NUM_YMMS; i++) {
+ va_pos_qw[i] = ADD_I64(va_pos_qw[i - 1], inc);
+ }
+
+ // va_pos_qw vectors hold qw positions 0 .. (NUM_YMMS * QWORDS_IN_YMM - 1)
+ // Therefore, we set the increment vector inc such that by adding it to
+ // va_pos_qw vectors, they hold the next YMM_QWORDS qw positions.
+ inc = SET1_I64(YMMS_QWORDS);
+
+ for(size_t i = 0; i < (sizeof(*r) / sizeof(uint64_t)); i += YMMS_QWORDS) {
+ for(size_t va_iter = 0; va_iter < NUM_YMMS; va_iter++) {
+ va[va_iter] = SET_ZERO;
+ }
+
+ for(size_t w_iter = 0; w_iter < w_size; w_iter++) {
+ int32_t w = wlist[w_iter] - first_pos;
+ w_pos_qw = SET1_I64(w >> 6);
+ w_pos_bit = SLLI_I64(one, w & MASK(6));
+
+ // 4. Compare the positions in va_pos_qw with w_pos_qw
+ // and set the appropriate bit in va
+ for(size_t va_iter = 0; va_iter < NUM_YMMS; va_iter++) {
+ va_mask = CMPEQ_I64(va_pos_qw[va_iter], w_pos_qw);
+ va[va_iter] |= (va_mask & w_pos_bit);
+ }
+ }
+
+ // 5. Set the va_pos_qw to the next qw positions of r
+ // and store the previously computed data in r
+ for(size_t va_iter = 0; va_iter < NUM_YMMS; va_iter++) {
+ STORE(&r64[i + (va_iter * QWORDS_IN_YMM)], va[va_iter]);
+ va_pos_qw[va_iter] = ADD_I64(va_pos_qw[va_iter], inc);
+ }
+ }
+}
+
+int is_new_avx2(IN const idx_t *wlist, IN const size_t ctr)
+{
+ bike_static_assert((sizeof(idx_t) == sizeof(uint32_t)), idx_t_is_not_uint32_t);
+
+ REG_T idx_ctr = SET1_I32(wlist[ctr]);
+
+ for(size_t i = 0; i < ctr; i += REG_DWORDS) {
+ // Comparisons are done with SIMD instructions with each SIMD register
+ // containing REG_DWORDS elements. We compare registers element-wise:
+ // idx_ctr = {8 repetitions of wlist[ctr]}, with
+ // idx_cur = {8 consecutive elements from wlist}.
+ // In the last iteration we consider wlist elements only up to ctr.
+
+ REG_T idx_cur = LOAD(&wlist[i]);
+ REG_T cmp_res = CMPEQ_I32(idx_ctr, idx_cur);
+ uint32_t check = MOVEMASK(cmp_res);
+
+ // Handle the last iteration by appropriate masking.
+ if(ctr < (i + REG_DWORDS)) {
+ // MOVEMASK instruction in AVX2 compares corresponding bytes from
+ // two given vector registers and produces a 32-bit mask. On the other hand,
+ // we compare idx_t elements, not bytes, so we multiply by sizeof(idx_t).
+ check &= MASK((ctr - i) * sizeof(idx_t));
+ }
+
+ if(check != 0) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+#endif
+
+typedef int dummy_typedef_to_avoid_empty_translation_unit_warning;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx512.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx512.c
new file mode 100644
index 0000000000..6cab4cffea
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_avx512.c
@@ -0,0 +1,123 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#if defined(S2N_BIKE_R3_AVX512)
+
+#include <assert.h>
+
+#include "sampling_internal.h"
+
+#define AVX512_INTERNAL
+#include "x86_64_intrinsic.h"
+
+// For improved performance, we process NUM_ZMMS amount of data in parallel.
+#define NUM_ZMMS (8)
+#define ZMMS_QWORDS (QWORDS_IN_ZMM * NUM_ZMMS)
+
+void secure_set_bits_avx512(OUT pad_r_t * r,
+ IN const size_t first_pos,
+ IN const idx_t *wlist,
+ IN const size_t w_size)
+{
+ // The function assumes that the size of r is a multiple
+ // of the cumulative size of used ZMM registers.
+ assert((sizeof(*r) / sizeof(uint64_t)) % ZMMS_QWORDS == 0);
+
+ // va vectors hold the bits of the output array "r"
+ // va_pos_qw vectors hold the qw position indices of "r"
+ // The algorithm works as follows:
+ // 1. Initialize va_pos_qw with starting positions of qw's of "r"
+ // va_pos_qw = (7, 6, 5, 4, 3, 2, 1, 0);
+ // 2. While the size of "r" is not exceeded:
+ // 3. For each w in wlist:
+ // 4. Compare the pos_qw of w with positions in va_pos_qw
+ // and for the position which is equal set the appropriate
+ // bit in va vector.
+ // 5. Set va_pos_qw to the next qw positions of "r"
+ __m512i va[NUM_ZMMS], va_pos_qw[NUM_ZMMS];
+ __m512i w_pos_qw, w_pos_bit, one, inc;
+ __mmask8 va_mask;
+
+ uint64_t *r64 = (uint64_t *)r;
+
+ one = SET1_I64(1);
+ inc = SET1_I64(QWORDS_IN_ZMM);
+
+ // 1. Initialize
+ va_pos_qw[0] = SET_I64(7, 6, 5, 4, 3, 2, 1, 0);
+ for(size_t i = 1; i < NUM_ZMMS; i++) {
+ va_pos_qw[i] = ADD_I64(va_pos_qw[i - 1], inc);
+ }
+
+ // va_pos_qw vectors hold qw positions 0 .. (NUM_ZMMS * QWORDS_IN_ZMM - 1)
+ // Therefore, we set the increment vector inc such that by adding it to
+ // va_pos_qw vectors they hold the next ZMMS_QWORDS qw positions.
+ inc = SET1_I64(ZMMS_QWORDS);
+
+ for(size_t i = 0; i < (sizeof(*r) / sizeof(uint64_t)); i += ZMMS_QWORDS) {
+ for(size_t va_iter = 0; va_iter < NUM_ZMMS; va_iter++) {
+ va[va_iter] = SET_ZERO;
+ }
+
+ for(size_t w_iter = 0; w_iter < w_size; w_iter++) {
+ int32_t w = wlist[w_iter] - first_pos;
+ w_pos_qw = SET1_I64(w >> 6);
+#if (defined(__GNUC__) && ((__GNUC__ == 6) || (__GNUC__ == 5)) && !defined(__clang__)) || (defined(__clang__) && __clang_major__ == 3 && __clang_minor__ == 9)
+ // Workaround for gcc-6, gcc-5, and clang < 3.9, which do not allowing the second
+ // argument of SLLI to be non-immediate value.
+ __m512i temp = SET1_I64(w & MASK(6));
+ w_pos_bit = SLLV_I64(one, temp);
+#else
+ w_pos_bit = SLLI_I64(one, w & MASK(6));
+#endif
+
+ // 4. Compare the positions in va_pos_qw with w_pos_qw
+ // and set the appropriate bit in va
+ for(size_t va_iter = 0; va_iter < NUM_ZMMS; va_iter++) {
+ va_mask = CMPMEQ_I64(va_pos_qw[va_iter], w_pos_qw);
+ va[va_iter] = MOR_I64(va[va_iter], va_mask, va[va_iter], w_pos_bit);
+ }
+ }
+
+ // 5. Set the va_pos_qw to the next qw positions of r
+ // and store the previously computed data in r
+ for(size_t va_iter = 0; va_iter < NUM_ZMMS; va_iter++) {
+ STORE(&r64[i + (va_iter * QWORDS_IN_ZMM)], va[va_iter]);
+ va_pos_qw[va_iter] = ADD_I64(va_pos_qw[va_iter], inc);
+ }
+ }
+}
+
+int is_new_avx512(IN const idx_t *wlist, IN const size_t ctr)
+{
+ bike_static_assert((sizeof(idx_t) == sizeof(uint32_t)), idx_t_is_not_uint32_t);
+
+ REG_T idx_ctr = SET1_I32(wlist[ctr]);
+
+ for(size_t i = 0; i < ctr; i += REG_DWORDS) {
+ // Comparisons are done with SIMD instructions with each SIMD register
+ // containing REG_DWORDS elements. We compare registers element-wise:
+ // idx_ctr = {8 repetitions of wlist[ctr]}, with
+ // idx_cur = {8 consecutive elements from wlist}.
+ // In the last iteration we consider wlist elements only up to ctr.
+
+ REG_T idx_cur = LOAD(&wlist[i]);
+
+ uint16_t mask = (ctr < (i + REG_DWORDS)) ? MASK(ctr - i) : 0xffff;
+ uint16_t check = MCMPMEQ_I32(mask, idx_ctr, idx_cur);
+
+ if(check != 0) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+#endif
+
+typedef int dummy_typedef_to_avoid_empty_translation_unit_warning;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_internal.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_internal.h
new file mode 100644
index 0000000000..3fd68354f2
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_internal.h
@@ -0,0 +1,66 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+#include "pq-crypto/s2n_pq.h"
+#include "defs.h"
+#include "types.h"
+
+void secure_set_bits_port(OUT pad_r_t *r,
+ IN size_t first_pos,
+ IN const idx_t *wlist,
+ IN size_t w_size);
+
+// Compares wlist[ctr] to w[i] for all i < ctr.
+// Returns 0 if wlist[ctr] is contained in wlist, returns 1 otherwise.
+int is_new_port(IN const idx_t *wlist, IN const size_t ctr);
+
+#if defined(S2N_BIKE_R3_AVX2)
+void secure_set_bits_avx2(OUT pad_r_t *r,
+ IN size_t first_pos,
+ IN const idx_t *wlist,
+ IN size_t w_size);
+
+int is_new_avx2(IN const idx_t *wlist, IN const size_t ctr);
+#endif
+
+#if defined(S2N_BIKE_R3_AVX512)
+void secure_set_bits_avx512(OUT pad_r_t *r,
+ IN size_t first_pos,
+ IN const idx_t *wlist,
+ IN size_t w_size);
+int is_new_avx512(IN const idx_t *wlist, IN const size_t ctr);
+#endif
+
+typedef struct sampling_ctx_st {
+ void (*secure_set_bits)(OUT pad_r_t *r,
+ IN size_t first_pos,
+ IN const idx_t *wlist,
+ IN size_t w_size);
+ int (*is_new)(IN const idx_t *wlist, IN const size_t ctr);
+} sampling_ctx;
+
+_INLINE_ void sampling_ctx_init(sampling_ctx *ctx)
+{
+#if defined(S2N_BIKE_R3_AVX512)
+ if(s2n_bike_r3_is_avx512_enabled()) {
+ ctx->secure_set_bits = secure_set_bits_avx512;
+ ctx->is_new = is_new_avx512;
+ } else
+#endif
+#if defined(S2N_BIKE_R3_AVX2)
+ if(s2n_bike_r3_is_avx2_enabled()) {
+ ctx->secure_set_bits = secure_set_bits_avx2;
+ ctx->is_new = is_new_avx2;
+ } else
+#endif
+ {
+ ctx->secure_set_bits = secure_set_bits_port;
+ ctx->is_new = is_new_port;
+ }
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_portable.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_portable.c
new file mode 100644
index 0000000000..b670730f0a
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sampling_portable.c
@@ -0,0 +1,60 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#include <assert.h>
+
+#include "sampling_internal.h"
+#include "utilities.h"
+
+#define MAX_WLIST_SIZE (T1 > DV ? T1 : DV)
+
+void secure_set_bits_port(OUT pad_r_t * r,
+ IN const size_t first_pos,
+ IN const idx_t *wlist,
+ IN const size_t w_size)
+{
+ assert(w_size <= MAX_WLIST_SIZE);
+
+ // Ideally we would like to cast r.val but it is not guaranteed to be aligned
+ // as the entire pad_r_t structure. Thus, we assert that the position of val
+ // is at the beginning of r.
+ bike_static_assert(offsetof(pad_r_t, val) == 0, val_wrong_pos_in_pad_r_t);
+ uint64_t *a64 = (uint64_t *)r;
+ uint64_t val, mask;
+
+ // The size of wlist can be either DV or T. So, we set it to max(D, T)
+ size_t pos_qw[MAX_WLIST_SIZE];
+ size_t pos_bit[MAX_WLIST_SIZE];
+
+ // Identify the QW position of every value, and the bit position inside this QW.
+ for(size_t i = 0; i < w_size; i++) {
+ int32_t w = wlist[i] - first_pos;
+ pos_qw[i] = w >> 6;
+ pos_bit[i] = BIT(w & MASK(6));
+ }
+
+ // Fill each QW in constant time
+ for(size_t i = 0; i < (sizeof(*r) / sizeof(uint64_t)); i++) {
+ val = 0;
+ for(size_t j = 0; j < w_size; j++) {
+ mask = (-1ULL) + (!secure_cmp32(pos_qw[j], i));
+ val |= (pos_bit[j] & mask);
+ }
+ a64[i] = val;
+ }
+}
+
+int is_new_port(IN const idx_t *wlist, IN const size_t ctr)
+{
+ for(size_t i = 0; i < ctr; i++) {
+ if(wlist[i] == wlist[ctr]) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sha.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sha.h
new file mode 100644
index 0000000000..1857d6e638
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/sha.h
@@ -0,0 +1,43 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+#include "cleanup.h"
+#include "error.h"
+#include "types.h"
+#include "utilities.h"
+
+#include <openssl/sha.h>
+
+#define SHA384_DGST_BYTES 48ULL
+#define SHA384_DGST_QWORDS (SHA384_DGST_BYTES / 8)
+
+#define SHA512_DGST_BYTES 64ULL
+#define SHA512_DGST_QWORDS (SHA512_DGST_BYTES / 8)
+
+typedef struct sha384_dgst_s {
+ union {
+ uint8_t raw[SHA384_DGST_BYTES];
+ uint64_t qw[SHA384_DGST_QWORDS];
+ } u;
+} sha384_dgst_t;
+bike_static_assert(sizeof(sha384_dgst_t) == SHA384_DGST_BYTES, sha384_dgst_size);
+
+typedef sha384_dgst_t sha_dgst_t;
+CLEANUP_FUNC(sha_dgst, sha_dgst_t)
+
+_INLINE_ ret_t sha(OUT sha_dgst_t * dgst,
+ IN const uint32_t byte_len,
+ IN const uint8_t *msg)
+{
+ if(SHA384(msg, byte_len, dgst->u.raw) != NULL) {
+ return SUCCESS;
+ }
+
+ return FAIL;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/types.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/types.h
new file mode 100644
index 0000000000..436a584f3e
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/types.h
@@ -0,0 +1,120 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "bike_defs.h"
+#include "error.h"
+
+typedef struct uint128_s {
+ union {
+ uint8_t bytes[16]; // NOLINT
+ uint32_t dw[4]; // NOLINT
+ uint64_t qw[2]; // NOLINT
+ } u;
+} uint128_t;
+
+// Make sure no compiler optimizations.
+#pragma pack(push, 1)
+
+typedef struct seed_s {
+ uint8_t raw[SEED_BYTES];
+} seed_t;
+
+typedef struct seeds_s {
+ seed_t seed[NUM_OF_SEEDS];
+} seeds_t;
+
+typedef struct r_s {
+ uint8_t raw[R_BYTES];
+} r_t;
+
+typedef struct m_s {
+ uint8_t raw[M_BYTES];
+} m_t;
+
+typedef struct e_s {
+ r_t val[N0];
+} e_t;
+
+#define E0_RAW(e) ((e)->val[0].raw)
+#define E1_RAW(e) ((e)->val[1].raw)
+
+typedef struct ct_s {
+ r_t c0;
+ m_t c1;
+} ct_t;
+
+typedef r_t pk_t;
+
+typedef struct ss_st {
+ uint8_t raw[SS_BYTES];
+} ss_t;
+
+typedef uint32_t idx_t;
+
+typedef struct compressed_idx_d_s {
+ idx_t val[DV];
+} compressed_idx_d_t;
+
+typedef compressed_idx_d_t compressed_idx_d_ar_t[N0];
+
+// The secret key holds both representations, to avoid
+// the compression in Decaps.
+typedef struct sk_s {
+ compressed_idx_d_ar_t wlist;
+ r_t bin[N0];
+ pk_t pk;
+ m_t sigma;
+} sk_t;
+
+typedef ALIGN(sizeof(idx_t)) sk_t aligned_sk_t;
+
+// Pad r to the next Block
+typedef struct pad_r_s {
+ r_t val;
+ uint8_t pad[R_PADDED_BYTES - sizeof(r_t)];
+} ALIGN(ALIGN_BYTES) pad_r_t;
+
+// Double padded r, required for multiplication and squaring
+typedef struct dbl_pad_r_s {
+ uint8_t raw[2 * R_PADDED_BYTES];
+} ALIGN(ALIGN_BYTES) dbl_pad_r_t;
+
+typedef struct pad_e_s {
+ pad_r_t val[N0];
+} ALIGN(ALIGN_BYTES) pad_e_t;
+
+#define PE0_RAW(e) ((e)->val[0].val.raw)
+#define PE1_RAW(e) ((e)->val[1].val.raw)
+
+typedef struct func_k_s {
+ m_t m;
+ r_t c0;
+ m_t c1;
+} func_k_t;
+
+// For a faster rotate we triplicate the syndrome (into 3 copies)
+typedef struct syndrome_s {
+ uint64_t qw[3 * R_QWORDS];
+} ALIGN(ALIGN_BYTES) syndrome_t;
+
+typedef struct upc_slice_s {
+ union {
+ pad_r_t r;
+ uint64_t qw[sizeof(pad_r_t) / sizeof(uint64_t)];
+ } ALIGN(ALIGN_BYTES) u;
+} ALIGN(ALIGN_BYTES) upc_slice_t;
+
+typedef struct upc_s {
+ upc_slice_t slice[SLICES];
+} upc_t;
+
+#pragma pack(pop)
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/utilities.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/utilities.c
new file mode 100644
index 0000000000..0c6ad3ea0f
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/utilities.c
@@ -0,0 +1,24 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#include <inttypes.h>
+
+#include "utilities.h"
+
+#define BITS_IN_QWORD 64ULL
+#define BITS_IN_BYTE 8ULL
+
+uint64_t r_bits_vector_weight(IN const r_t *in)
+{
+ uint64_t acc = 0;
+ for(size_t i = 0; i < (R_BYTES - 1); i++) {
+ acc += __builtin_popcount(in->raw[i]);
+ }
+
+ acc += __builtin_popcount(in->raw[R_BYTES - 1] & LAST_R_BYTE_MASK);
+ return acc;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/utilities.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/utilities.h
new file mode 100644
index 0000000000..f544990a1a
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/utilities.h
@@ -0,0 +1,139 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+#pragma once
+
+// For memset
+#include <string.h>
+
+#include "types.h"
+
+uint64_t r_bits_vector_weight(IN const r_t *in);
+
+// "VALUE_BARRIER returns |a|, but prevents GCC and Clang from reasoning about
+// the returned value. This is used to mitigate compilers undoing constant-time
+// code, until we can express our requirements directly in the language.
+// Note the compiler is aware that |VALUE_BARRIER| has no side effects and
+// always has the same output for a given input. This allows it to eliminate
+// dead code, move computations across loops, and vectorize."
+// See:
+// https://github.com/google/boringssl/commit/92b7c89e6e8ba82924b57153bea68241cc45f658
+#if(defined(__GNUC__) || defined(__clang__))
+# define VALUE_BARRIER(name, type) \
+ _INLINE_ type name##_barrier(type a) \
+ { \
+ __asm__("" : "+r"(a) : /* no inputs */); \
+ return a; \
+ }
+#else
+# define VALUE_BARRIER(name, type) \
+ _INLINE_ type name##_barrier(type a) { return a; }
+#endif
+
+VALUE_BARRIER(u8, uint8_t)
+VALUE_BARRIER(u32, uint32_t)
+VALUE_BARRIER(u64, uint64_t)
+
+// Comparing value in a constant time manner
+_INLINE_ uint32_t secure_cmp(IN const uint8_t *a,
+ IN const uint8_t *b,
+ IN const uint32_t size)
+{
+ volatile uint8_t res = 0;
+
+ for(uint32_t i = 0; i < size; ++i) {
+ res |= (a[i] ^ b[i]);
+ }
+
+ return (0 == res);
+}
+
+// Return 1 if the arguments are equal to each other. Return 0 otherwise.
+_INLINE_ uint32_t secure_cmp32(IN const uint32_t v1, IN const uint32_t v2)
+{
+#if defined(__aarch64__)
+ uint32_t res;
+ __asm__ __volatile__("cmp %w[V1], %w[V2]; \n "
+ "cset %w[RES], EQ; \n"
+ : [RES] "=r"(res)
+ : [V1] "r"(v1), [V2] "r"(v2)
+ : "cc" /*The condition code flag*/);
+ return res;
+#elif defined(__x86_64__) || defined(__i386__)
+ uint32_t res;
+ __asm__ __volatile__("xor %%edx, %%edx; \n"
+ "cmp %1, %2; \n "
+ "sete %%dl; \n"
+ "mov %%edx, %0; \n"
+ : "=r"(res)
+ : "r"(v1), "r"(v2)
+ : "rdx");
+ return res;
+#else
+ // Insecure comparison: The main purpose of secure_cmp32 is to avoid
+ // branches to prevent potential side channel leaks. To do that,
+ // we normally leverage some special CPU instructions such as "sete"
+ // (for __x86_64__) and "cset" (for __aarch64__). When dealing with general
+ // CPU architectures, the interpretation of the line below is left for the
+ // compiler. It could lead to an "insecure" branch. This case needs to be
+ // checked individually on such platforms
+ // (e.g., by checking the compiler-generated assembly).
+ return (v1 == v2 ? 1 : 0);
+#endif
+}
+
+// Return 0 if v1 < v2, (-1) otherwise
+_INLINE_ uint32_t secure_l32_mask(IN const uint32_t v1, IN const uint32_t v2)
+{
+#if defined(__aarch64__)
+ uint32_t res;
+ __asm__ __volatile__("cmp %w[V2], %w[V1]; \n "
+ "cset %w[RES], HI; \n"
+ : [RES] "=r"(res)
+ : [V1] "r"(v1), [V2] "r"(v2)
+ : "cc" /*The condition code flag*/);
+ return (res - 1);
+#elif defined(__x86_64__) || defined(__i386__)
+ uint32_t res;
+ __asm__ __volatile__("xor %%edx, %%edx; \n"
+ "cmp %1, %2; \n "
+ "setl %%dl; \n"
+ "dec %%edx; \n"
+ "mov %%edx, %0; \n"
+
+ : "=r"(res)
+ : "r"(v2), "r"(v1)
+ : "rdx");
+
+ return res;
+#else
+ // If v1 >= v2 then the subtraction result is 0^32||(v1-v2).
+ // else it is 1^32||(v2-v1+1). Subsequently, negating the upper
+ // 32 bits gives 0 if v1 < v2 and otherwise (-1).
+ return ~((uint32_t)(((uint64_t)v1 - (uint64_t)v2) >> 32));
+#endif
+}
+
+// bike_memcpy avoids the undefined behaviour of memcpy when byte_len=0
+_INLINE_ void *bike_memcpy(void *dst, const void *src, size_t byte_len)
+{
+ if(byte_len == 0) {
+ return dst;
+ }
+
+ return memcpy(dst, src, byte_len);
+}
+
+// bike_memset avoids the undefined behaviour of memset when byte_len=0
+_INLINE_ void *bike_memset(void *dst, const int ch, size_t byte_len)
+{
+ if(byte_len == 0) {
+ return dst;
+ }
+
+ return memset(dst, ch, byte_len);
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r3/x86_64_intrinsic.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/x86_64_intrinsic.h
new file mode 100644
index 0000000000..b5c1e989bd
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r3/x86_64_intrinsic.h
@@ -0,0 +1,132 @@
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ */
+
+// This file contains definitions of macros for SIMD intrinsic functions, used
+// throughout the code package. Where necessary, we add a suffix to a macro,
+// and denote the type of the elements (operateds). For example,
+// - I16 denotes 16-bit wide integers,
+// - U64 denotes 64-bit wide unsigned integers.
+
+#pragma once
+
+#if defined(S2N_BIKE_R3_AVX2) || defined(S2N_BIKE_R3_AVX512)
+# include <immintrin.h>
+#endif
+
+// clang 3.9 doesn't recognize this macro
+#if !defined(_MM_CMPINT_EQ)
+# define _MM_CMPINT_EQ (0)
+#endif
+
+// For functions in gf2x_mul.c we use exactly the same code for
+// PORTABLE, AVX2, AVX512 implementations. Based on the implementation,
+// we define macros for the different data types (uint64_t, __m256i, __m512i),
+// and all the required operations (LOAD, STORE, >>, <<) on these types.
+#if defined(AVX2_INTERNAL)
+
+# define REG_T __m256i
+
+# define LOAD(mem) _mm256_loadu_si256((const void *)(mem))
+# define STORE(mem, reg) _mm256_storeu_si256((void *)(mem), (reg))
+
+# define SLLI_I64(a, imm) _mm256_slli_epi64(a, imm)
+# define SRLI_I64(a, imm) _mm256_srli_epi64(a, imm)
+
+#elif defined(AVX512_INTERNAL)
+
+# define REG_T __m512i
+
+# define LOAD(mem) _mm512_loadu_si512((mem))
+# define STORE(mem, reg) _mm512_storeu_si512((mem), (reg))
+
+# define SLLI_I64(a, imm) _mm512_slli_epi64(a, imm)
+# define SRLI_I64(a, imm) _mm512_srli_epi64(a, imm)
+
+#elif defined(PORTABLE_INTERNAL)
+
+# define REG_T uint64_t
+
+# define LOAD(mem) (mem)[0]
+# define STORE(mem, val) (mem)[0] = val
+
+# define SLLI_I64(a, imm) ((a) << (imm))
+# define SRLI_I64(a, imm) ((a) >> (imm))
+
+#endif
+
+// NOLINT is used to avoid the sizeof(T)/sizeof(T) warning when REG_T is defined
+// to be uint64_t
+#define REG_QWORDS (sizeof(REG_T) / sizeof(uint64_t)) // NOLINT
+#define REG_DWORDS (sizeof(REG_T) / sizeof(uint32_t)) // NOLINT
+
+// The rest of the SIMD macros that are
+// required for AVX2 and AVX512 implementation.
+#if defined(AVX2_INTERNAL)
+
+# define SET_I8(...) _mm256_set_epi8(__VA_ARGS__)
+# define SET_I32(...) _mm256_set_epi32(__VA_ARGS__)
+# define SET_I64(...) _mm256_set_epi64x(__VA_ARGS__)
+# define SET1_I8(a) _mm256_set1_epi8(a)
+# define SET1_I16(a) _mm256_set1_epi16(a)
+# define SET1_I32(a) _mm256_set1_epi32(a)
+# define SET1_I64(a) _mm256_set1_epi64x(a)
+# define SET_ZERO _mm256_setzero_si256()
+
+# define ADD_I8(a, b) _mm256_add_epi8(a, b)
+# define SUB_I8(a, b) _mm256_sub_epi8(a, b)
+# define ADD_I16(a, b) _mm256_add_epi16(a, b)
+# define SUB_I16(a, b) _mm256_sub_epi16(a, b)
+# define ADD_I64(a, b) _mm256_add_epi64(a, b)
+# define SRLI_I16(a, imm) _mm256_srli_epi16(a, imm)
+# define SLLI_I32(a, imm) _mm256_slli_epi32(a, imm)
+# define SLLV_I32(a, b) _mm256_sllv_epi32(a, b)
+
+# define CMPGT_I16(a, b) _mm256_cmpgt_epi16(a, b)
+# define CMPEQ_I16(a, b) _mm256_cmpeq_epi16(a, b)
+# define CMPEQ_I32(a, b) _mm256_cmpeq_epi32(a, b)
+# define CMPEQ_I64(a, b) _mm256_cmpeq_epi64(a, b)
+
+# define SHUF_I8(a, b) _mm256_shuffle_epi8(a, b)
+# define BLENDV_I8(a, b, mask) _mm256_blendv_epi8(a, b, mask)
+# define PERMVAR_I32(a, idx) _mm256_permutevar8x32_epi32(a, idx)
+# define PERM_I64(a, imm) _mm256_permute4x64_epi64(a, imm)
+
+# define MOVEMASK(a) _mm256_movemask_epi8(a)
+
+#elif defined(AVX512_INTERNAL)
+
+# define MSTORE(mem, mask, reg) _mm512_mask_store_epi64((mem), (mask), (reg))
+
+# define SET1_I8(a) _mm512_set1_epi8(a)
+# define SET1_I32(a) _mm512_set1_epi32(a)
+# define SET1_I64(a) _mm512_set1_epi64(a)
+# define SET1MZ_I8(mask, a) _mm512_maskz_set1_epi8(mask, a)
+# define SET1_I16(a) _mm512_set1_epi16(a)
+# define SET_I64(...) _mm512_set_epi64(__VA_ARGS__)
+# define SET_ZERO _mm512_setzero_si512()
+
+# define ADD_I16(a, b) _mm512_add_epi16(a, b)
+# define ADD_I64(a, b) _mm512_add_epi64(a, b)
+# define MSUB_I16(src, k, a, b) _mm512_mask_sub_epi16(src, k, a, b)
+# define SRLI_I16(a, imm) _mm512_srli_epi16(a, imm)
+# define SRLV_I64(a, cnt) _mm512_srlv_epi64(a, cnt)
+# define SLLV_I64(a, cnt) _mm512_sllv_epi64(a, cnt)
+# define MOR_I64(src, mask, a, b) _mm512_mask_or_epi64(src, mask, a, b)
+# define MXOR_I64(src, mask, a, b) _mm512_mask_xor_epi64(src, mask, a, b)
+# define VALIGN(a, b, count) _mm512_alignr_epi64(a, b, count)
+
+# define CMPM_U8(a, b, cmp_op) _mm512_cmp_epu8_mask(a, b, cmp_op)
+# define CMPM_U16(a, b, cmp_op) _mm512_cmp_epu16_mask(a, b, cmp_op)
+# define CMPMEQ_I64(a, b) _mm512_cmp_epi64_mask(a, b, _MM_CMPINT_EQ)
+# define MCMPMEQ_I32(mask, a, b) \
+ _mm512_mask_cmp_epi32_mask(mask, a, b, _MM_CMPINT_EQ)
+
+# define PERMX_I64(a, imm) _mm512_permutex_epi64(a, imm)
+# define PERMX2VAR_I64(a, idx, b) _mm512_permutex2var_epi64(a, idx, b)
+# define PERMXVAR_I64(idx, a) _mm512_permutexvar_epi64(idx, a)
+
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/indcpa.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/indcpa.c
index c37548326d..4c520b693f 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/indcpa.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/indcpa.c
@@ -188,7 +188,7 @@ int PQCLEAN_KYBER51290S_CLEAN_indcpa_keypair(uint8_t *pk, uint8_t *sk) {
uint8_t *noiseseed = buf + KYBER_SYMBYTES;
uint8_t nonce = 0;
- GUARD_AS_POSIX(s2n_get_random_bytes(buf, KYBER_SYMBYTES));
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(buf, KYBER_SYMBYTES));
hash_g(buf, buf, KYBER_SYMBYTES);
gen_a(a, publicseed);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/kyber_90s_r2_kem.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/kyber_90s_r2_kem.c
index 9de3c1daef..5b4c088b11 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/kyber_90s_r2_kem.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/kyber_90s_r2_kem.c
@@ -22,14 +22,14 @@
* Returns 0 (success)
**************************************************/
int kyber_512_90s_r2_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) {
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
size_t i;
PQCLEAN_KYBER51290S_CLEAN_indcpa_keypair(pk, sk);
for (i = 0; i < KYBER_INDCPA_PUBLICKEYBYTES; i++) {
sk[i + KYBER_INDCPA_SECRETKEYBYTES] = pk[i];
}
hash_h(sk + KYBER_SECRETKEYBYTES - 2 * KYBER_SYMBYTES, pk, KYBER_PUBLICKEYBYTES);
- GUARD_AS_POSIX(s2n_get_random_bytes(sk + KYBER_SECRETKEYBYTES - KYBER_SYMBYTES, KYBER_SYMBYTES)); /* Value z for pseudo-random output on reject */
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(sk + KYBER_SECRETKEYBYTES - KYBER_SYMBYTES, KYBER_SYMBYTES)); /* Value z for pseudo-random output on reject */
return 0;
}
@@ -46,11 +46,11 @@ int kyber_512_90s_r2_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) {
* Returns 0 (success)
**************************************************/
int kyber_512_90s_r2_crypto_kem_enc(uint8_t *ct, uint8_t *ss, const uint8_t *pk) {
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
uint8_t kr[2 * KYBER_SYMBYTES]; /* Will contain key, coins */
uint8_t buf[2 * KYBER_SYMBYTES];
- GUARD_AS_POSIX(s2n_get_random_bytes(buf, KYBER_SYMBYTES));
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(buf, KYBER_SYMBYTES));
hash_h(buf, buf, KYBER_SYMBYTES); /* Don't release system RNG output */
hash_h(buf + KYBER_SYMBYTES, pk, KYBER_PUBLICKEYBYTES); /* Multitarget countermeasure for coins + contributory KEM */
@@ -78,7 +78,7 @@ int kyber_512_90s_r2_crypto_kem_enc(uint8_t *ct, uint8_t *ss, const uint8_t *pk)
* On failure, ss will contain a pseudo-random value.
**************************************************/
int kyber_512_90s_r2_crypto_kem_dec(uint8_t *ss, const uint8_t *ct, const uint8_t *sk) {
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
size_t i;
uint8_t fail;
uint8_t cmp[KYBER_CIPHERTEXTBYTES];
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/ntt.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/ntt.h
index 720bee975a..66fc5a9484 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/ntt.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_90s_r2/ntt.h
@@ -6,8 +6,8 @@
extern const int16_t PQCLEAN_KYBER51290S_CLEAN_zetas[128];
extern const int16_t PQCLEAN_KYBER51290S_CLEAN_zetasinv[128];
-void PQCLEAN_KYBER51290S_CLEAN_ntt(int16_t *poly);
-void PQCLEAN_KYBER51290S_CLEAN_invntt(int16_t *poly);
+void PQCLEAN_KYBER51290S_CLEAN_ntt(int16_t poly[256]);
+void PQCLEAN_KYBER51290S_CLEAN_invntt(int16_t poly[256]);
void PQCLEAN_KYBER51290S_CLEAN_basemul(int16_t r[2], const int16_t a[2], const int16_t b[2], int16_t zeta);
#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/indcpa.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/indcpa.c
index 233b5d8515..1b76bb9b0c 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/indcpa.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/indcpa.c
@@ -188,7 +188,7 @@ int PQCLEAN_KYBER512_CLEAN_indcpa_keypair(uint8_t *pk, uint8_t *sk) {
uint8_t *noiseseed = buf + KYBER_SYMBYTES;
uint8_t nonce = 0;
- GUARD_AS_POSIX(s2n_get_random_bytes(buf, KYBER_SYMBYTES));
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(buf, KYBER_SYMBYTES));
hash_g(buf, buf, KYBER_SYMBYTES);
gen_a(a, publicseed);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/kyber_r2_kem.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/kyber_r2_kem.c
index 9871084bb4..140ec352d4 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/kyber_r2_kem.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/kyber_r2_kem.c
@@ -22,14 +22,14 @@
* Returns 0 (success)
**************************************************/
int kyber_512_r2_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) {
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
size_t i;
PQCLEAN_KYBER512_CLEAN_indcpa_keypair(pk, sk);
for (i = 0; i < KYBER_INDCPA_PUBLICKEYBYTES; i++) {
sk[i + KYBER_INDCPA_SECRETKEYBYTES] = pk[i];
}
hash_h(sk + KYBER_SECRETKEYBYTES - 2 * KYBER_SYMBYTES, pk, KYBER_PUBLICKEYBYTES);
- GUARD_AS_POSIX(s2n_get_random_bytes(sk + KYBER_SECRETKEYBYTES - KYBER_SYMBYTES, KYBER_SYMBYTES)); /* Value z for pseudo-random output on reject */
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(sk + KYBER_SECRETKEYBYTES - KYBER_SYMBYTES, KYBER_SYMBYTES)); /* Value z for pseudo-random output on reject */
return 0;
}
@@ -46,11 +46,11 @@ int kyber_512_r2_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) {
* Returns 0 (success)
**************************************************/
int kyber_512_r2_crypto_kem_enc(uint8_t *ct, uint8_t *ss, const uint8_t *pk) {
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
uint8_t kr[2 * KYBER_SYMBYTES]; /* Will contain key, coins */
uint8_t buf[2 * KYBER_SYMBYTES];
- GUARD_AS_POSIX(s2n_get_random_bytes(buf, KYBER_SYMBYTES));
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(buf, KYBER_SYMBYTES));
hash_h(buf, buf, KYBER_SYMBYTES); /* Don't release system RNG output */
hash_h(buf + KYBER_SYMBYTES, pk, KYBER_PUBLICKEYBYTES); /* Multitarget countermeasure for coins + contributory KEM */
@@ -78,7 +78,7 @@ int kyber_512_r2_crypto_kem_enc(uint8_t *ct, uint8_t *ss, const uint8_t *pk) {
* On failure, ss will contain a pseudo-random value.
**************************************************/
int kyber_512_r2_crypto_kem_dec(uint8_t *ss, const uint8_t *ct, const uint8_t *sk) {
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
size_t i;
uint8_t fail;
uint8_t cmp[KYBER_CIPHERTEXTBYTES];
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/ntt.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/ntt.h
index 13e976f7d0..7885e7cdc6 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/ntt.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r2/ntt.h
@@ -6,8 +6,8 @@
extern const int16_t PQCLEAN_KYBER512_CLEAN_zetas[128];
extern const int16_t PQCLEAN_KYBER512_CLEAN_zetasinv[128];
-void PQCLEAN_KYBER512_CLEAN_ntt(int16_t *poly);
-void PQCLEAN_KYBER512_CLEAN_invntt(int16_t *poly);
+void PQCLEAN_KYBER512_CLEAN_ntt(int16_t poly[256]);
+void PQCLEAN_KYBER512_CLEAN_invntt(int16_t poly[256]);
void PQCLEAN_KYBER512_CLEAN_basemul(int16_t r[2], const int16_t a[2], const int16_t b[2], int16_t zeta);
#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-1600-times4-SIMD256_avx2.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-1600-times4-SIMD256_avx2.c
new file mode 100644
index 0000000000..349442f65c
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-1600-times4-SIMD256_avx2.c
@@ -0,0 +1,1284 @@
+/*
+Implementation by the Keccak, Keyak and Ketje Teams, namely, Guido Bertoni,
+Joan Daemen, Michaël Peeters, Gilles Van Assche and Ronny Van Keer, hereby
+denoted as "the implementer".
+
+For more information, feedback or questions, please refer to our websites:
+http://keccak.noekeon.org/
+http://keyak.noekeon.org/
+http://ketje.noekeon.org/
+
+To the extent possible under law, the implementer has waived all copyright
+and related or neighboring rights to the source code in this file.
+http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+// extra headers are removed: smmintrin.h, wmmintrin.h and emmintrin.h
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#include <immintrin.h>
+#include "KeccakP-align_avx2.h"
+#include "KeccakP-1600-times4-SnP_avx2.h"
+#include "KeccakP-SIMD256-config_avx2.h"
+
+#include "KeccakP-brg_endian_avx2.h"
+#if (PLATFORM_BYTE_ORDER != IS_LITTLE_ENDIAN)
+#error Expecting a little-endian platform
+#endif
+
+typedef unsigned char UINT8;
+typedef unsigned long long int UINT64;
+typedef __m128i V128;
+typedef __m256i V256;
+
+#define laneIndex(instanceIndex, lanePosition) ((lanePosition)*4 + instanceIndex)
+
+#if defined(KeccakP1600times4_useAVX2)
+ #define ANDnu256(a, b) _mm256_andnot_si256(a, b)
+ // correcting cast-align error
+ // old version: #define CONST256(a) _mm256_load_si256((const V256 *)&(a))
+ #define CONST256(a) _mm256_load_si256((const void *)&(a))
+ #define CONST256_64(a) (V256)_mm256_broadcast_sd((const double*)(&a))
+ #define LOAD256(a) _mm256_load_si256((const V256 *)&(a))
+ // correcting cast-align error
+ // old version: #define LOAD256u(a) _mm256_loadu_si256((const V256 *)&(a))
+ #define LOAD256u(a) _mm256_loadu_si256((const void *)&(a))
+ #define LOAD4_64(a, b, c, d) _mm256_set_epi64x((UINT64)(a), (UINT64)(b), (UINT64)(c), (UINT64)(d))
+ #define ROL64in256(d, a, o) d = _mm256_or_si256(_mm256_slli_epi64(a, o), _mm256_srli_epi64(a, 64-(o)))
+ #define ROL64in256_8(d, a) d = _mm256_shuffle_epi8(a, CONST256(rho8))
+ #define ROL64in256_56(d, a) d = _mm256_shuffle_epi8(a, CONST256(rho56))
+static const UINT64 rho8[4] = {0x0605040302010007, 0x0E0D0C0B0A09080F, 0x1615141312111017, 0x1E1D1C1B1A19181F};
+static const UINT64 rho56[4] = {0x0007060504030201, 0x080F0E0D0C0B0A09, 0x1017161514131211, 0x181F1E1D1C1B1A19};
+ #define STORE256(a, b) _mm256_store_si256((V256 *)&(a), b)
+ // correcting cast-align error
+ // old version: #define STORE256u(a, b) _mm256_storeu_si256((V256 *)&(a), b)
+ #define STORE256u(a, b) _mm256_storeu_si256((void *)&(a), b)
+ #define STORE2_128(ah, al, v) _mm256_storeu2_m128d((V128*)&(ah), (V128*)&(al), v)
+ #define XOR256(a, b) _mm256_xor_si256(a, b)
+ #define XOReq256(a, b) a = _mm256_xor_si256(a, b)
+ #define UNPACKL( a, b ) _mm256_unpacklo_epi64((a), (b))
+ #define UNPACKH( a, b ) _mm256_unpackhi_epi64((a), (b))
+ #define PERM128( a, b, c ) (V256)_mm256_permute2f128_ps((__m256)(a), (__m256)(b), c)
+ #define SHUFFLE64( a, b, c ) (V256)_mm256_shuffle_pd((__m256d)(a), (__m256d)(b), c)
+
+ #define UNINTLEAVE() lanesL01 = UNPACKL( lanes0, lanes1 ), \
+ lanesH01 = UNPACKH( lanes0, lanes1 ), \
+ lanesL23 = UNPACKL( lanes2, lanes3 ), \
+ lanesH23 = UNPACKH( lanes2, lanes3 ), \
+ lanes0 = PERM128( lanesL01, lanesL23, 0x20 ), \
+ lanes2 = PERM128( lanesL01, lanesL23, 0x31 ), \
+ lanes1 = PERM128( lanesH01, lanesH23, 0x20 ), \
+ lanes3 = PERM128( lanesH01, lanesH23, 0x31 )
+
+ #define INTLEAVE() lanesL01 = PERM128( lanes0, lanes2, 0x20 ), \
+ lanesH01 = PERM128( lanes1, lanes3, 0x20 ), \
+ lanesL23 = PERM128( lanes0, lanes2, 0x31 ), \
+ lanesH23 = PERM128( lanes1, lanes3, 0x31 ), \
+ lanes0 = SHUFFLE64( lanesL01, lanesH01, 0x00 ), \
+ lanes1 = SHUFFLE64( lanesL01, lanesH01, 0x0F ), \
+ lanes2 = SHUFFLE64( lanesL23, lanesH23, 0x00 ), \
+ lanes3 = SHUFFLE64( lanesL23, lanesH23, 0x0F )
+
+#endif
+
+#define SnP_laneLengthInBytes 8
+
+void KeccakP1600times4_InitializeAll(void *states)
+{
+ memset(states, 0, KeccakP1600times4_statesSizeInBytes);
+}
+
+void KeccakP1600times4_AddBytes(void *states, unsigned int instanceIndex, const unsigned char *data, unsigned int offset, unsigned int length)
+{
+ unsigned int sizeLeft = length;
+ unsigned int lanePosition = offset/SnP_laneLengthInBytes;
+ unsigned int offsetInLane = offset%SnP_laneLengthInBytes;
+ const unsigned char *curData = data;
+ UINT64 *statesAsLanes = (UINT64 *)states;
+
+ if ((sizeLeft > 0) && (offsetInLane != 0)) {
+ unsigned int bytesInLane = SnP_laneLengthInBytes - offsetInLane;
+ UINT64 lane = 0;
+ if (bytesInLane > sizeLeft)
+ bytesInLane = sizeLeft;
+ memcpy((unsigned char*)&lane + offsetInLane, curData, bytesInLane);
+ statesAsLanes[laneIndex(instanceIndex, lanePosition)] ^= lane;
+ sizeLeft -= bytesInLane;
+ lanePosition++;
+ curData += bytesInLane;
+ }
+
+ while(sizeLeft >= SnP_laneLengthInBytes) {
+ // correcting cast-align error
+ // old version: UINT64 lane = *((const UINT64*)curData);
+ UINT64 lane = *((const UINT64*)(const void *)curData);
+ statesAsLanes[laneIndex(instanceIndex, lanePosition)] ^= lane;
+ sizeLeft -= SnP_laneLengthInBytes;
+ lanePosition++;
+ curData += SnP_laneLengthInBytes;
+ }
+
+ if (sizeLeft > 0) {
+ UINT64 lane = 0;
+ memcpy(&lane, curData, sizeLeft);
+ statesAsLanes[laneIndex(instanceIndex, lanePosition)] ^= lane;
+ }
+}
+
+void KeccakP1600times4_AddLanesAll(void *states, const unsigned char *data, unsigned int laneCount, unsigned int laneOffset)
+{
+ V256 *stateAsLanes = (V256 *)states;
+ unsigned int i;
+ // correcting cast-align errors
+ // old version: const UINT64 *curData0 = (const UINT64 *)data;
+ const UINT64 *curData0 = (const void *)data;
+ // old version: const UINT64 *curData1 = (const UINT64 *)(data+laneOffset*SnP_laneLengthInBytes);
+ const UINT64 *curData1 = (const void *)(data+laneOffset*SnP_laneLengthInBytes);
+ // old version: const UINT64 *curData2 = (const UINT64 *)(data+laneOffset*2*SnP_laneLengthInBytes);
+ const UINT64 *curData2 = (const void *)(data+laneOffset*2*SnP_laneLengthInBytes);
+ // old version: const UINT64 *curData3 = (const UINT64 *)(data+laneOffset*3*SnP_laneLengthInBytes);
+ const UINT64 *curData3 = (const void *)(data+laneOffset*3*SnP_laneLengthInBytes);
+ V256 lanes0, lanes1, lanes2, lanes3, lanesL01, lanesL23, lanesH01, lanesH23;
+
+ #define Xor_In( argIndex ) XOReq256(stateAsLanes[argIndex], LOAD4_64(curData3[argIndex], curData2[argIndex], curData1[argIndex], curData0[argIndex]))
+
+ #define Xor_In4( argIndex ) lanes0 = LOAD256u( curData0[argIndex]),\
+ lanes1 = LOAD256u( curData1[argIndex]),\
+ lanes2 = LOAD256u( curData2[argIndex]),\
+ lanes3 = LOAD256u( curData3[argIndex]),\
+ INTLEAVE(),\
+ XOReq256( stateAsLanes[argIndex+0], lanes0 ),\
+ XOReq256( stateAsLanes[argIndex+1], lanes1 ),\
+ XOReq256( stateAsLanes[argIndex+2], lanes2 ),\
+ XOReq256( stateAsLanes[argIndex+3], lanes3 )
+
+ if ( laneCount >= 16 ) {
+ Xor_In4( 0 );
+ Xor_In4( 4 );
+ Xor_In4( 8 );
+ Xor_In4( 12 );
+ if ( laneCount >= 20 ) {
+ Xor_In4( 16 );
+ for(i=20; i<laneCount; i++)
+ Xor_In( i );
+ }
+ else {
+ for(i=16; i<laneCount; i++)
+ Xor_In( i );
+ }
+ }
+ else {
+ for(i=0; i<laneCount; i++)
+ Xor_In( i );
+ }
+ #undef Xor_In
+ #undef Xor_In4
+}
+
+void KeccakP1600times4_OverwriteBytes(void *states, unsigned int instanceIndex, const unsigned char *data, unsigned int offset, unsigned int length)
+{
+ unsigned int sizeLeft = length;
+ unsigned int lanePosition = offset/SnP_laneLengthInBytes;
+ unsigned int offsetInLane = offset%SnP_laneLengthInBytes;
+ const unsigned char *curData = data;
+ UINT64 *statesAsLanes = (UINT64 *)states;
+
+ if ((sizeLeft > 0) && (offsetInLane != 0)) {
+ unsigned int bytesInLane = SnP_laneLengthInBytes - offsetInLane;
+ if (bytesInLane > sizeLeft)
+ bytesInLane = sizeLeft;
+ memcpy( ((unsigned char *)&statesAsLanes[laneIndex(instanceIndex, lanePosition)]) + offsetInLane, curData, bytesInLane);
+ sizeLeft -= bytesInLane;
+ lanePosition++;
+ curData += bytesInLane;
+ }
+
+ while(sizeLeft >= SnP_laneLengthInBytes) {
+ // correcting cast-align error
+ // old version: UINT64 lane = *((const UINT64*)curData);
+ UINT64 lane = *((const UINT64*)(const void*)curData);
+ statesAsLanes[laneIndex(instanceIndex, lanePosition)] = lane;
+ sizeLeft -= SnP_laneLengthInBytes;
+ lanePosition++;
+ curData += SnP_laneLengthInBytes;
+ }
+
+ if (sizeLeft > 0) {
+ memcpy(&statesAsLanes[laneIndex(instanceIndex, lanePosition)], curData, sizeLeft);
+ }
+}
+
+void KeccakP1600times4_OverwriteLanesAll(void *states, const unsigned char *data, unsigned int laneCount, unsigned int laneOffset)
+{
+ V256 *stateAsLanes = (V256 *)states;
+ unsigned int i;
+ // correcting cast-align errors
+ // old version: const UINT64 *curData0 = (const UINT64 *)data;
+ const UINT64 *curData0 = (const void *)data;
+ // old version: const UINT64 *curData1 = (const UINT64 *)(data+laneOffset*SnP_laneLengthInBytes);
+ const UINT64 *curData1 = (const void *)(data+laneOffset*SnP_laneLengthInBytes);
+ // old version: const UINT64 *curData2 = (const UINT64 *)(data+laneOffset*2*SnP_laneLengthInBytes);
+ const UINT64 *curData2 = (const void *)(data+laneOffset*2*SnP_laneLengthInBytes);
+ // old version: const UINT64 *curData3 = (const UINT64 *)(data+laneOffset*3*SnP_laneLengthInBytes);
+ const UINT64 *curData3 = (const void *)(data+laneOffset*3*SnP_laneLengthInBytes);
+ V256 lanes0, lanes1, lanes2, lanes3, lanesL01, lanesL23, lanesH01, lanesH23;
+
+ #define OverWr( argIndex ) STORE256(stateAsLanes[argIndex], LOAD4_64(curData3[argIndex], curData2[argIndex], curData1[argIndex], curData0[argIndex]))
+
+ #define OverWr4( argIndex ) lanes0 = LOAD256u( curData0[argIndex]),\
+ lanes1 = LOAD256u( curData1[argIndex]),\
+ lanes2 = LOAD256u( curData2[argIndex]),\
+ lanes3 = LOAD256u( curData3[argIndex]),\
+ INTLEAVE(),\
+ STORE256( stateAsLanes[argIndex+0], lanes0 ),\
+ STORE256( stateAsLanes[argIndex+1], lanes1 ),\
+ STORE256( stateAsLanes[argIndex+2], lanes2 ),\
+ STORE256( stateAsLanes[argIndex+3], lanes3 )
+
+ if ( laneCount >= 16 ) {
+ OverWr4( 0 );
+ OverWr4( 4 );
+ OverWr4( 8 );
+ OverWr4( 12 );
+ if ( laneCount >= 20 ) {
+ OverWr4( 16 );
+ for(i=20; i<laneCount; i++)
+ OverWr( i );
+ }
+ else {
+ for(i=16; i<laneCount; i++)
+ OverWr( i );
+ }
+ }
+ else {
+ for(i=0; i<laneCount; i++)
+ OverWr( i );
+ }
+ #undef OverWr
+ #undef OverWr4
+}
+
+void KeccakP1600times4_OverwriteWithZeroes(void *states, unsigned int instanceIndex, unsigned int byteCount)
+{
+ unsigned int sizeLeft = byteCount;
+ unsigned int lanePosition = 0;
+ UINT64 *statesAsLanes = (UINT64 *)states;
+
+ while(sizeLeft >= SnP_laneLengthInBytes) {
+ statesAsLanes[laneIndex(instanceIndex, lanePosition)] = 0;
+ sizeLeft -= SnP_laneLengthInBytes;
+ lanePosition++;
+ }
+
+ if (sizeLeft > 0) {
+ memset(&statesAsLanes[laneIndex(instanceIndex, lanePosition)], 0, sizeLeft);
+ }
+}
+
+void KeccakP1600times4_ExtractBytes(const void *states, unsigned int instanceIndex, unsigned char *data, unsigned int offset, unsigned int length)
+{
+ unsigned int sizeLeft = length;
+ unsigned int lanePosition = offset/SnP_laneLengthInBytes;
+ unsigned int offsetInLane = offset%SnP_laneLengthInBytes;
+ unsigned char *curData = data;
+ const UINT64 *statesAsLanes = (const UINT64 *)states;
+
+ if ((sizeLeft > 0) && (offsetInLane != 0)) {
+ unsigned int bytesInLane = SnP_laneLengthInBytes - offsetInLane;
+ if (bytesInLane > sizeLeft)
+ bytesInLane = sizeLeft;
+ // correcting cast-qual error
+ // old version: memcpy( curData, ((unsigned char *)&statesAsLanes[laneIndex(instanceIndex, lanePosition)]) + offsetInLane, bytesInLane);
+ memcpy( curData, ((const unsigned char *)&statesAsLanes[laneIndex(instanceIndex, lanePosition)]) + offsetInLane, bytesInLane);
+ sizeLeft -= bytesInLane;
+ lanePosition++;
+ curData += bytesInLane;
+ }
+
+ while(sizeLeft >= SnP_laneLengthInBytes) {
+ // correcting cast-align error
+ // old version: *(UINT64*)curData = statesAsLanes[laneIndex(instanceIndex, lanePosition)];
+ *(UINT64*)(void*)curData = statesAsLanes[laneIndex(instanceIndex, lanePosition)];
+ sizeLeft -= SnP_laneLengthInBytes;
+ lanePosition++;
+ curData += SnP_laneLengthInBytes;
+ }
+
+ if (sizeLeft > 0) {
+ memcpy( curData, &statesAsLanes[laneIndex(instanceIndex, lanePosition)], sizeLeft);
+ }
+}
+
+void KeccakP1600times4_ExtractLanesAll(const void *states, unsigned char *data, unsigned int laneCount, unsigned int laneOffset)
+{
+ // correcting cast-align errors
+ // old version: UINT64 *curData0 = (UINT64 *)data;
+ UINT64 *curData0 = (void *)data;
+ // old version: UINT64 *curData1 = (UINT64 *)(data+laneOffset*1*SnP_laneLengthInBytes);
+ UINT64 *curData1 = (void *)(data+laneOffset*1*SnP_laneLengthInBytes);
+ // old version: UINT64 *curData2 = (UINT64 *)(data+laneOffset*2*SnP_laneLengthInBytes);
+ UINT64 *curData2 = (void *)(data+laneOffset*2*SnP_laneLengthInBytes);
+ // old version: UINT64 *curData3 = (UINT64 *)(data+laneOffset*3*SnP_laneLengthInBytes);
+ UINT64 *curData3 = (void *)(data+laneOffset*3*SnP_laneLengthInBytes);
+
+ const V256 *stateAsLanes = (const V256 *)states;
+ const UINT64 *stateAsLanes64 = (const UINT64*)states;
+ V256 lanes0, lanes1, lanes2, lanes3, lanesL01, lanesL23, lanesH01, lanesH23;
+ unsigned int i;
+
+ #define Extr( argIndex ) curData0[argIndex] = stateAsLanes64[4*(argIndex)], \
+ curData1[argIndex] = stateAsLanes64[4*(argIndex)+1], \
+ curData2[argIndex] = stateAsLanes64[4*(argIndex)+2], \
+ curData3[argIndex] = stateAsLanes64[4*(argIndex)+3]
+
+ #define Extr4( argIndex ) lanes0 = LOAD256( stateAsLanes[argIndex+0] ), \
+ lanes1 = LOAD256( stateAsLanes[argIndex+1] ), \
+ lanes2 = LOAD256( stateAsLanes[argIndex+2] ), \
+ lanes3 = LOAD256( stateAsLanes[argIndex+3] ), \
+ UNINTLEAVE(), \
+ STORE256u( curData0[argIndex], lanes0 ), \
+ STORE256u( curData1[argIndex], lanes1 ), \
+ STORE256u( curData2[argIndex], lanes2 ), \
+ STORE256u( curData3[argIndex], lanes3 )
+
+ if ( laneCount >= 16 ) {
+ Extr4( 0 );
+ Extr4( 4 );
+ Extr4( 8 );
+ Extr4( 12 );
+ if ( laneCount >= 20 ) {
+ Extr4( 16 );
+ for(i=20; i<laneCount; i++)
+ Extr( i );
+ }
+ else {
+ for(i=16; i<laneCount; i++)
+ Extr( i );
+ }
+ }
+ else {
+ for(i=0; i<laneCount; i++)
+ Extr( i );
+ }
+ #undef Extr
+ #undef Extr4
+}
+
+void KeccakP1600times4_ExtractAndAddBytes(const void *states, unsigned int instanceIndex, const unsigned char *input, unsigned char *output, unsigned int offset, unsigned int length)
+{
+ unsigned int sizeLeft = length;
+ unsigned int lanePosition = offset/SnP_laneLengthInBytes;
+ unsigned int offsetInLane = offset%SnP_laneLengthInBytes;
+ const unsigned char *curInput = input;
+ unsigned char *curOutput = output;
+ const UINT64 *statesAsLanes = (const UINT64 *)states;
+
+ if ((sizeLeft > 0) && (offsetInLane != 0)) {
+ unsigned int bytesInLane = SnP_laneLengthInBytes - offsetInLane;
+ UINT64 lane = statesAsLanes[laneIndex(instanceIndex, lanePosition)] >> (8 * offsetInLane);
+ if (bytesInLane > sizeLeft)
+ bytesInLane = sizeLeft;
+ sizeLeft -= bytesInLane;
+ do {
+ *(curOutput++) = *(curInput++) ^ (unsigned char)lane;
+ lane >>= 8;
+ } while ( --bytesInLane != 0);
+ lanePosition++;
+ }
+
+ while(sizeLeft >= SnP_laneLengthInBytes) {
+ // correcting cast-align and cast-qual errors
+ // old version: *((UINT64*)curOutput) = *((UINT64*)curInput) ^ statesAsLanes[laneIndex(instanceIndex, lanePosition)];
+ *((UINT64*)(void*)curOutput) = *((const UINT64*)(const void*)curInput) ^ statesAsLanes[laneIndex(instanceIndex, lanePosition)];
+ sizeLeft -= SnP_laneLengthInBytes;
+ lanePosition++;
+ curInput += SnP_laneLengthInBytes;
+ curOutput += SnP_laneLengthInBytes;
+ }
+
+ if (sizeLeft != 0) {
+ UINT64 lane = statesAsLanes[laneIndex(instanceIndex, lanePosition)];
+ do {
+ *(curOutput++) = *(curInput++) ^ (unsigned char)lane;
+ lane >>= 8;
+ } while ( --sizeLeft != 0);
+ }
+}
+
+void KeccakP1600times4_ExtractAndAddLanesAll(const void *states, const unsigned char *input, unsigned char *output, unsigned int laneCount, unsigned int laneOffset)
+{
+ // correcting cast-align and cast-qual errors
+ // old version: const UINT64 *curInput0 = (UINT64 *)input;
+ const UINT64 *curInput0 = (const void *)input;
+ // old version: const UINT64 *curInput1 = (UINT64 *)(input+laneOffset*1*SnP_laneLengthInBytes);
+ const UINT64 *curInput1 = (const void *)(input+laneOffset*1*SnP_laneLengthInBytes);
+ // old version: const UINT64 *curInput2 = (UINT64 *)(input+laneOffset*2*SnP_laneLengthInBytes);
+ const UINT64 *curInput2 = (const void *)(input+laneOffset*2*SnP_laneLengthInBytes);
+ // old version: const UINT64 *curInput3 = (UINT64 *)(input+laneOffset*3*SnP_laneLengthInBytes);
+ const UINT64 *curInput3 = (const void *)(input+laneOffset*3*SnP_laneLengthInBytes);
+ // correcting cast-align errors
+ // old version: UINT64 *curOutput0 = (UINT64 *)output;
+ UINT64 *curOutput0 = (void *)output;
+ // old version: UINT64 *curOutput1 = (UINT64 *)(output+laneOffset*1*SnP_laneLengthInBytes);
+ UINT64 *curOutput1 = (void *)(output+laneOffset*1*SnP_laneLengthInBytes);
+ // old version: UUINT64 *curOutput2 = (UINT64 *)(output+laneOffset*2*SnP_laneLengthInBytes);
+ UINT64 *curOutput2 = (void *)(output+laneOffset*2*SnP_laneLengthInBytes);
+ // old version: UINT64 *curOutput3 = (UINT64 *)(output+laneOffset*3*SnP_laneLengthInBytes);
+ UINT64 *curOutput3 = (void *)(output+laneOffset*3*SnP_laneLengthInBytes);
+
+ const V256 *stateAsLanes = (const V256 *)states;
+ const UINT64 *stateAsLanes64 = (const UINT64*)states;
+ V256 lanes0, lanes1, lanes2, lanes3, lanesL01, lanesL23, lanesH01, lanesH23;
+ unsigned int i;
+
+ #define ExtrXor( argIndex ) \
+ curOutput0[argIndex] = curInput0[argIndex] ^ stateAsLanes64[4*(argIndex)],\
+ curOutput1[argIndex] = curInput1[argIndex] ^ stateAsLanes64[4*(argIndex)+1],\
+ curOutput2[argIndex] = curInput2[argIndex] ^ stateAsLanes64[4*(argIndex)+2],\
+ curOutput3[argIndex] = curInput3[argIndex] ^ stateAsLanes64[4*(argIndex)+3]
+
+ #define ExtrXor4( argIndex ) \
+ lanes0 = LOAD256( stateAsLanes[argIndex+0] ),\
+ lanes1 = LOAD256( stateAsLanes[argIndex+1] ),\
+ lanes2 = LOAD256( stateAsLanes[argIndex+2] ),\
+ lanes3 = LOAD256( stateAsLanes[argIndex+3] ),\
+ UNINTLEAVE(),\
+ lanesL01 = LOAD256u( curInput0[argIndex]),\
+ lanesH01 = LOAD256u( curInput1[argIndex]),\
+ lanesL23 = LOAD256u( curInput2[argIndex]),\
+ lanesH23 = LOAD256u( curInput3[argIndex]),\
+ XOReq256( lanes0, lanesL01 ),\
+ XOReq256( lanes1, lanesH01 ),\
+ XOReq256( lanes2, lanesL23 ),\
+ XOReq256( lanes3, lanesH23 ),\
+ STORE256u( curOutput0[argIndex], lanes0 ),\
+ STORE256u( curOutput1[argIndex], lanes1 ),\
+ STORE256u( curOutput2[argIndex], lanes2 ),\
+ STORE256u( curOutput3[argIndex], lanes3 )
+
+ if ( laneCount >= 16 ) {
+ ExtrXor4( 0 );
+ ExtrXor4( 4 );
+ ExtrXor4( 8 );
+ ExtrXor4( 12 );
+ if ( laneCount >= 20 ) {
+ ExtrXor4( 16 );
+ for(i=20; i<laneCount; i++)
+ ExtrXor( i );
+ }
+ else {
+ for(i=16; i<laneCount; i++)
+ ExtrXor( i );
+ }
+ }
+ else {
+ for(i=0; i<laneCount; i++)
+ ExtrXor( i );
+ }
+ #undef ExtrXor
+ #undef ExtrXor4
+}
+
+#define declareABCDE \
+ V256 Aba, Abe, Abi, Abo, Abu; \
+ V256 Aga, Age, Agi, Ago, Agu; \
+ V256 Aka, Ake, Aki, Ako, Aku; \
+ V256 Ama, Ame, Ami, Amo, Amu; \
+ V256 Asa, Ase, Asi, Aso, Asu; \
+ V256 Bba, Bbe, Bbi, Bbo, Bbu; \
+ V256 Bga, Bge, Bgi, Bgo, Bgu; \
+ V256 Bka, Bke, Bki, Bko, Bku; \
+ V256 Bma, Bme, Bmi, Bmo, Bmu; \
+ V256 Bsa, Bse, Bsi, Bso, Bsu; \
+ V256 Ca, Ce, Ci, Co, Cu; \
+ V256 Ca1, Ce1, Ci1, Co1, Cu1; \
+ V256 Da, De, Di, Do, Du; \
+ V256 Eba, Ebe, Ebi, Ebo, Ebu; \
+ V256 Ega, Ege, Egi, Ego, Egu; \
+ V256 Eka, Eke, Eki, Eko, Eku; \
+ V256 Ema, Eme, Emi, Emo, Emu; \
+ V256 Esa, Ese, Esi, Eso, Esu; \
+
+#define prepareTheta \
+ Ca = XOR256(Aba, XOR256(Aga, XOR256(Aka, XOR256(Ama, Asa)))); \
+ Ce = XOR256(Abe, XOR256(Age, XOR256(Ake, XOR256(Ame, Ase)))); \
+ Ci = XOR256(Abi, XOR256(Agi, XOR256(Aki, XOR256(Ami, Asi)))); \
+ Co = XOR256(Abo, XOR256(Ago, XOR256(Ako, XOR256(Amo, Aso)))); \
+ Cu = XOR256(Abu, XOR256(Agu, XOR256(Aku, XOR256(Amu, Asu)))); \
+
+/* --- Theta Rho Pi Chi Iota Prepare-theta */
+/* --- 64-bit lanes mapped to 64-bit words */
+#define thetaRhoPiChiIotaPrepareTheta(i, A, E) \
+ ROL64in256(Ce1, Ce, 1); \
+ Da = XOR256(Cu, Ce1); \
+ ROL64in256(Ci1, Ci, 1); \
+ De = XOR256(Ca, Ci1); \
+ ROL64in256(Co1, Co, 1); \
+ Di = XOR256(Ce, Co1); \
+ ROL64in256(Cu1, Cu, 1); \
+ Do = XOR256(Ci, Cu1); \
+ ROL64in256(Ca1, Ca, 1); \
+ Du = XOR256(Co, Ca1); \
+\
+ XOReq256(A##ba, Da); \
+ Bba = A##ba; \
+ XOReq256(A##ge, De); \
+ ROL64in256(Bbe, A##ge, 44); \
+ XOReq256(A##ki, Di); \
+ ROL64in256(Bbi, A##ki, 43); \
+ E##ba = XOR256(Bba, ANDnu256(Bbe, Bbi)); \
+ XOReq256(E##ba, CONST256_64(KeccakF1600RoundConstants[i])); \
+ Ca = E##ba; \
+ XOReq256(A##mo, Do); \
+ ROL64in256(Bbo, A##mo, 21); \
+ E##be = XOR256(Bbe, ANDnu256(Bbi, Bbo)); \
+ Ce = E##be; \
+ XOReq256(A##su, Du); \
+ ROL64in256(Bbu, A##su, 14); \
+ E##bi = XOR256(Bbi, ANDnu256(Bbo, Bbu)); \
+ Ci = E##bi; \
+ E##bo = XOR256(Bbo, ANDnu256(Bbu, Bba)); \
+ Co = E##bo; \
+ E##bu = XOR256(Bbu, ANDnu256(Bba, Bbe)); \
+ Cu = E##bu; \
+\
+ XOReq256(A##bo, Do); \
+ ROL64in256(Bga, A##bo, 28); \
+ XOReq256(A##gu, Du); \
+ ROL64in256(Bge, A##gu, 20); \
+ XOReq256(A##ka, Da); \
+ ROL64in256(Bgi, A##ka, 3); \
+ E##ga = XOR256(Bga, ANDnu256(Bge, Bgi)); \
+ XOReq256(Ca, E##ga); \
+ XOReq256(A##me, De); \
+ ROL64in256(Bgo, A##me, 45); \
+ E##ge = XOR256(Bge, ANDnu256(Bgi, Bgo)); \
+ XOReq256(Ce, E##ge); \
+ XOReq256(A##si, Di); \
+ ROL64in256(Bgu, A##si, 61); \
+ E##gi = XOR256(Bgi, ANDnu256(Bgo, Bgu)); \
+ XOReq256(Ci, E##gi); \
+ E##go = XOR256(Bgo, ANDnu256(Bgu, Bga)); \
+ XOReq256(Co, E##go); \
+ E##gu = XOR256(Bgu, ANDnu256(Bga, Bge)); \
+ XOReq256(Cu, E##gu); \
+\
+ XOReq256(A##be, De); \
+ ROL64in256(Bka, A##be, 1); \
+ XOReq256(A##gi, Di); \
+ ROL64in256(Bke, A##gi, 6); \
+ XOReq256(A##ko, Do); \
+ ROL64in256(Bki, A##ko, 25); \
+ E##ka = XOR256(Bka, ANDnu256(Bke, Bki)); \
+ XOReq256(Ca, E##ka); \
+ XOReq256(A##mu, Du); \
+ ROL64in256_8(Bko, A##mu); \
+ E##ke = XOR256(Bke, ANDnu256(Bki, Bko)); \
+ XOReq256(Ce, E##ke); \
+ XOReq256(A##sa, Da); \
+ ROL64in256(Bku, A##sa, 18); \
+ E##ki = XOR256(Bki, ANDnu256(Bko, Bku)); \
+ XOReq256(Ci, E##ki); \
+ E##ko = XOR256(Bko, ANDnu256(Bku, Bka)); \
+ XOReq256(Co, E##ko); \
+ E##ku = XOR256(Bku, ANDnu256(Bka, Bke)); \
+ XOReq256(Cu, E##ku); \
+\
+ XOReq256(A##bu, Du); \
+ ROL64in256(Bma, A##bu, 27); \
+ XOReq256(A##ga, Da); \
+ ROL64in256(Bme, A##ga, 36); \
+ XOReq256(A##ke, De); \
+ ROL64in256(Bmi, A##ke, 10); \
+ E##ma = XOR256(Bma, ANDnu256(Bme, Bmi)); \
+ XOReq256(Ca, E##ma); \
+ XOReq256(A##mi, Di); \
+ ROL64in256(Bmo, A##mi, 15); \
+ E##me = XOR256(Bme, ANDnu256(Bmi, Bmo)); \
+ XOReq256(Ce, E##me); \
+ XOReq256(A##so, Do); \
+ ROL64in256_56(Bmu, A##so); \
+ E##mi = XOR256(Bmi, ANDnu256(Bmo, Bmu)); \
+ XOReq256(Ci, E##mi); \
+ E##mo = XOR256(Bmo, ANDnu256(Bmu, Bma)); \
+ XOReq256(Co, E##mo); \
+ E##mu = XOR256(Bmu, ANDnu256(Bma, Bme)); \
+ XOReq256(Cu, E##mu); \
+\
+ XOReq256(A##bi, Di); \
+ ROL64in256(Bsa, A##bi, 62); \
+ XOReq256(A##go, Do); \
+ ROL64in256(Bse, A##go, 55); \
+ XOReq256(A##ku, Du); \
+ ROL64in256(Bsi, A##ku, 39); \
+ E##sa = XOR256(Bsa, ANDnu256(Bse, Bsi)); \
+ XOReq256(Ca, E##sa); \
+ XOReq256(A##ma, Da); \
+ ROL64in256(Bso, A##ma, 41); \
+ E##se = XOR256(Bse, ANDnu256(Bsi, Bso)); \
+ XOReq256(Ce, E##se); \
+ XOReq256(A##se, De); \
+ ROL64in256(Bsu, A##se, 2); \
+ E##si = XOR256(Bsi, ANDnu256(Bso, Bsu)); \
+ XOReq256(Ci, E##si); \
+ E##so = XOR256(Bso, ANDnu256(Bsu, Bsa)); \
+ XOReq256(Co, E##so); \
+ E##su = XOR256(Bsu, ANDnu256(Bsa, Bse)); \
+ XOReq256(Cu, E##su); \
+\
+
+/* --- Theta Rho Pi Chi Iota */
+/* --- 64-bit lanes mapped to 64-bit words */
+#define thetaRhoPiChiIota(i, A, E) \
+ ROL64in256(Ce1, Ce, 1); \
+ Da = XOR256(Cu, Ce1); \
+ ROL64in256(Ci1, Ci, 1); \
+ De = XOR256(Ca, Ci1); \
+ ROL64in256(Co1, Co, 1); \
+ Di = XOR256(Ce, Co1); \
+ ROL64in256(Cu1, Cu, 1); \
+ Do = XOR256(Ci, Cu1); \
+ ROL64in256(Ca1, Ca, 1); \
+ Du = XOR256(Co, Ca1); \
+\
+ XOReq256(A##ba, Da); \
+ Bba = A##ba; \
+ XOReq256(A##ge, De); \
+ ROL64in256(Bbe, A##ge, 44); \
+ XOReq256(A##ki, Di); \
+ ROL64in256(Bbi, A##ki, 43); \
+ E##ba = XOR256(Bba, ANDnu256(Bbe, Bbi)); \
+ XOReq256(E##ba, CONST256_64(KeccakF1600RoundConstants[i])); \
+ XOReq256(A##mo, Do); \
+ ROL64in256(Bbo, A##mo, 21); \
+ E##be = XOR256(Bbe, ANDnu256(Bbi, Bbo)); \
+ XOReq256(A##su, Du); \
+ ROL64in256(Bbu, A##su, 14); \
+ E##bi = XOR256(Bbi, ANDnu256(Bbo, Bbu)); \
+ E##bo = XOR256(Bbo, ANDnu256(Bbu, Bba)); \
+ E##bu = XOR256(Bbu, ANDnu256(Bba, Bbe)); \
+\
+ XOReq256(A##bo, Do); \
+ ROL64in256(Bga, A##bo, 28); \
+ XOReq256(A##gu, Du); \
+ ROL64in256(Bge, A##gu, 20); \
+ XOReq256(A##ka, Da); \
+ ROL64in256(Bgi, A##ka, 3); \
+ E##ga = XOR256(Bga, ANDnu256(Bge, Bgi)); \
+ XOReq256(A##me, De); \
+ ROL64in256(Bgo, A##me, 45); \
+ E##ge = XOR256(Bge, ANDnu256(Bgi, Bgo)); \
+ XOReq256(A##si, Di); \
+ ROL64in256(Bgu, A##si, 61); \
+ E##gi = XOR256(Bgi, ANDnu256(Bgo, Bgu)); \
+ E##go = XOR256(Bgo, ANDnu256(Bgu, Bga)); \
+ E##gu = XOR256(Bgu, ANDnu256(Bga, Bge)); \
+\
+ XOReq256(A##be, De); \
+ ROL64in256(Bka, A##be, 1); \
+ XOReq256(A##gi, Di); \
+ ROL64in256(Bke, A##gi, 6); \
+ XOReq256(A##ko, Do); \
+ ROL64in256(Bki, A##ko, 25); \
+ E##ka = XOR256(Bka, ANDnu256(Bke, Bki)); \
+ XOReq256(A##mu, Du); \
+ ROL64in256_8(Bko, A##mu); \
+ E##ke = XOR256(Bke, ANDnu256(Bki, Bko)); \
+ XOReq256(A##sa, Da); \
+ ROL64in256(Bku, A##sa, 18); \
+ E##ki = XOR256(Bki, ANDnu256(Bko, Bku)); \
+ E##ko = XOR256(Bko, ANDnu256(Bku, Bka)); \
+ E##ku = XOR256(Bku, ANDnu256(Bka, Bke)); \
+\
+ XOReq256(A##bu, Du); \
+ ROL64in256(Bma, A##bu, 27); \
+ XOReq256(A##ga, Da); \
+ ROL64in256(Bme, A##ga, 36); \
+ XOReq256(A##ke, De); \
+ ROL64in256(Bmi, A##ke, 10); \
+ E##ma = XOR256(Bma, ANDnu256(Bme, Bmi)); \
+ XOReq256(A##mi, Di); \
+ ROL64in256(Bmo, A##mi, 15); \
+ E##me = XOR256(Bme, ANDnu256(Bmi, Bmo)); \
+ XOReq256(A##so, Do); \
+ ROL64in256_56(Bmu, A##so); \
+ E##mi = XOR256(Bmi, ANDnu256(Bmo, Bmu)); \
+ E##mo = XOR256(Bmo, ANDnu256(Bmu, Bma)); \
+ E##mu = XOR256(Bmu, ANDnu256(Bma, Bme)); \
+\
+ XOReq256(A##bi, Di); \
+ ROL64in256(Bsa, A##bi, 62); \
+ XOReq256(A##go, Do); \
+ ROL64in256(Bse, A##go, 55); \
+ XOReq256(A##ku, Du); \
+ ROL64in256(Bsi, A##ku, 39); \
+ E##sa = XOR256(Bsa, ANDnu256(Bse, Bsi)); \
+ XOReq256(A##ma, Da); \
+ ROL64in256(Bso, A##ma, 41); \
+ E##se = XOR256(Bse, ANDnu256(Bsi, Bso)); \
+ XOReq256(A##se, De); \
+ ROL64in256(Bsu, A##se, 2); \
+ E##si = XOR256(Bsi, ANDnu256(Bso, Bsu)); \
+ E##so = XOR256(Bso, ANDnu256(Bsu, Bsa)); \
+ E##su = XOR256(Bsu, ANDnu256(Bsa, Bse)); \
+\
+
+static ALIGN(KeccakP1600times4_statesAlignment) const UINT64 KeccakF1600RoundConstants[24] = {
+ 0x0000000000000001ULL,
+ 0x0000000000008082ULL,
+ 0x800000000000808aULL,
+ 0x8000000080008000ULL,
+ 0x000000000000808bULL,
+ 0x0000000080000001ULL,
+ 0x8000000080008081ULL,
+ 0x8000000000008009ULL,
+ 0x000000000000008aULL,
+ 0x0000000000000088ULL,
+ 0x0000000080008009ULL,
+ 0x000000008000000aULL,
+ 0x000000008000808bULL,
+ 0x800000000000008bULL,
+ 0x8000000000008089ULL,
+ 0x8000000000008003ULL,
+ 0x8000000000008002ULL,
+ 0x8000000000000080ULL,
+ 0x000000000000800aULL,
+ 0x800000008000000aULL,
+ 0x8000000080008081ULL,
+ 0x8000000000008080ULL,
+ 0x0000000080000001ULL,
+ 0x8000000080008008ULL};
+
+#define copyFromState(X, state) \
+ X##ba = LOAD256(state[ 0]); \
+ X##be = LOAD256(state[ 1]); \
+ X##bi = LOAD256(state[ 2]); \
+ X##bo = LOAD256(state[ 3]); \
+ X##bu = LOAD256(state[ 4]); \
+ X##ga = LOAD256(state[ 5]); \
+ X##ge = LOAD256(state[ 6]); \
+ X##gi = LOAD256(state[ 7]); \
+ X##go = LOAD256(state[ 8]); \
+ X##gu = LOAD256(state[ 9]); \
+ X##ka = LOAD256(state[10]); \
+ X##ke = LOAD256(state[11]); \
+ X##ki = LOAD256(state[12]); \
+ X##ko = LOAD256(state[13]); \
+ X##ku = LOAD256(state[14]); \
+ X##ma = LOAD256(state[15]); \
+ X##me = LOAD256(state[16]); \
+ X##mi = LOAD256(state[17]); \
+ X##mo = LOAD256(state[18]); \
+ X##mu = LOAD256(state[19]); \
+ X##sa = LOAD256(state[20]); \
+ X##se = LOAD256(state[21]); \
+ X##si = LOAD256(state[22]); \
+ X##so = LOAD256(state[23]); \
+ X##su = LOAD256(state[24]); \
+
+#define copyToState(state, X) \
+ STORE256(state[ 0], X##ba); \
+ STORE256(state[ 1], X##be); \
+ STORE256(state[ 2], X##bi); \
+ STORE256(state[ 3], X##bo); \
+ STORE256(state[ 4], X##bu); \
+ STORE256(state[ 5], X##ga); \
+ STORE256(state[ 6], X##ge); \
+ STORE256(state[ 7], X##gi); \
+ STORE256(state[ 8], X##go); \
+ STORE256(state[ 9], X##gu); \
+ STORE256(state[10], X##ka); \
+ STORE256(state[11], X##ke); \
+ STORE256(state[12], X##ki); \
+ STORE256(state[13], X##ko); \
+ STORE256(state[14], X##ku); \
+ STORE256(state[15], X##ma); \
+ STORE256(state[16], X##me); \
+ STORE256(state[17], X##mi); \
+ STORE256(state[18], X##mo); \
+ STORE256(state[19], X##mu); \
+ STORE256(state[20], X##sa); \
+ STORE256(state[21], X##se); \
+ STORE256(state[22], X##si); \
+ STORE256(state[23], X##so); \
+ STORE256(state[24], X##su); \
+
+#define copyStateVariables(X, Y) \
+ X##ba = Y##ba; \
+ X##be = Y##be; \
+ X##bi = Y##bi; \
+ X##bo = Y##bo; \
+ X##bu = Y##bu; \
+ X##ga = Y##ga; \
+ X##ge = Y##ge; \
+ X##gi = Y##gi; \
+ X##go = Y##go; \
+ X##gu = Y##gu; \
+ X##ka = Y##ka; \
+ X##ke = Y##ke; \
+ X##ki = Y##ki; \
+ X##ko = Y##ko; \
+ X##ku = Y##ku; \
+ X##ma = Y##ma; \
+ X##me = Y##me; \
+ X##mi = Y##mi; \
+ X##mo = Y##mo; \
+ X##mu = Y##mu; \
+ X##sa = Y##sa; \
+ X##se = Y##se; \
+ X##si = Y##si; \
+ X##so = Y##so; \
+ X##su = Y##su; \
+
+ #ifdef KeccakP1600times4_fullUnrolling
+#define FullUnrolling
+#else
+#define Unrolling KeccakP1600times4_unrolling
+#endif
+// The macro file is combined with source file directly
+/*****#include "KeccakP-1600-unrolling_avx2.macros"*****/
+/*******************************************************/
+/*
+Implementation by the Keccak, Keyak and Ketje Teams, namely, Guido Bertoni,
+Joan Daemen, Michaël Peeters, Gilles Van Assche and Ronny Van Keer, hereby
+denoted as "the implementer".
+
+For more information, feedback or questions, please refer to our websites:
+http://keccak.noekeon.org/
+http://keyak.noekeon.org/
+http://ketje.noekeon.org/
+
+To the extent possible under law, the implementer has waived all copyright
+and related or neighboring rights to the source code in this file.
+http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+#if (defined(FullUnrolling))
+#define rounds24 \
+ prepareTheta \
+ thetaRhoPiChiIotaPrepareTheta( 0, A, E) \
+ thetaRhoPiChiIotaPrepareTheta( 1, E, A) \
+ thetaRhoPiChiIotaPrepareTheta( 2, A, E) \
+ thetaRhoPiChiIotaPrepareTheta( 3, E, A) \
+ thetaRhoPiChiIotaPrepareTheta( 4, A, E) \
+ thetaRhoPiChiIotaPrepareTheta( 5, E, A) \
+ thetaRhoPiChiIotaPrepareTheta( 6, A, E) \
+ thetaRhoPiChiIotaPrepareTheta( 7, E, A) \
+ thetaRhoPiChiIotaPrepareTheta( 8, A, E) \
+ thetaRhoPiChiIotaPrepareTheta( 9, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(10, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(11, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(12, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(13, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(14, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(15, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(16, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(17, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(18, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(19, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(20, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(21, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(22, A, E) \
+ thetaRhoPiChiIota(23, E, A) \
+
+#define rounds12 \
+ prepareTheta \
+ thetaRhoPiChiIotaPrepareTheta(12, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(13, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(14, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(15, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(16, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(17, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(18, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(19, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(20, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(21, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(22, A, E) \
+ thetaRhoPiChiIota(23, E, A) \
+
+#elif (Unrolling == 12)
+#define rounds24 \
+ prepareTheta \
+ for(i=0; i<24; i+=12) { \
+ thetaRhoPiChiIotaPrepareTheta(i , A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+ 1, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(i+ 2, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+ 3, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(i+ 4, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+ 5, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(i+ 6, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+ 7, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(i+ 8, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+ 9, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(i+10, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+11, E, A) \
+ } \
+
+#define rounds12 \
+ prepareTheta \
+ thetaRhoPiChiIotaPrepareTheta(12, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(13, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(14, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(15, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(16, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(17, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(18, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(19, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(20, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(21, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(22, A, E) \
+ thetaRhoPiChiIota(23, E, A) \
+
+#elif (Unrolling == 6)
+#define rounds24 \
+ prepareTheta \
+ for(i=0; i<24; i+=6) { \
+ thetaRhoPiChiIotaPrepareTheta(i , A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(i+2, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+3, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(i+4, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+5, E, A) \
+ } \
+
+#define rounds12 \
+ prepareTheta \
+ for(i=12; i<24; i+=6) { \
+ thetaRhoPiChiIotaPrepareTheta(i , A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(i+2, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+3, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(i+4, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+5, E, A) \
+ } \
+
+#elif (Unrolling == 4)
+#define rounds24 \
+ prepareTheta \
+ for(i=0; i<24; i+=4) { \
+ thetaRhoPiChiIotaPrepareTheta(i , A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(i+2, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+3, E, A) \
+ } \
+
+#define rounds12 \
+ prepareTheta \
+ for(i=12; i<24; i+=4) { \
+ thetaRhoPiChiIotaPrepareTheta(i , A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(i+2, A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+3, E, A) \
+ } \
+
+#elif (Unrolling == 3)
+#define rounds24 \
+ prepareTheta \
+ for(i=0; i<24; i+=3) { \
+ thetaRhoPiChiIotaPrepareTheta(i , A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(i+2, A, E) \
+ copyStateVariables(A, E) \
+ } \
+
+#define rounds12 \
+ prepareTheta \
+ for(i=12; i<24; i+=3) { \
+ thetaRhoPiChiIotaPrepareTheta(i , A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \
+ thetaRhoPiChiIotaPrepareTheta(i+2, A, E) \
+ copyStateVariables(A, E) \
+ } \
+
+#elif (Unrolling == 2)
+#define rounds24 \
+ prepareTheta \
+ for(i=0; i<24; i+=2) { \
+ thetaRhoPiChiIotaPrepareTheta(i , A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \
+ } \
+
+#define rounds12 \
+ prepareTheta \
+ for(i=12; i<24; i+=2) { \
+ thetaRhoPiChiIotaPrepareTheta(i , A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \
+ } \
+
+#elif (Unrolling == 1)
+#define rounds24 \
+ prepareTheta \
+ for(i=0; i<24; i++) { \
+ thetaRhoPiChiIotaPrepareTheta(i , A, E) \
+ copyStateVariables(A, E) \
+ } \
+
+#define rounds12 \
+ prepareTheta \
+ for(i=12; i<24; i++) { \
+ thetaRhoPiChiIotaPrepareTheta(i , A, E) \
+ copyStateVariables(A, E) \
+ } \
+
+#else
+#error "Unrolling is not correctly specified!"
+#endif
+
+#define roundsN(__nrounds) \
+ prepareTheta \
+ i = 24 - (__nrounds); \
+ if ((i&1) != 0) { \
+ thetaRhoPiChiIotaPrepareTheta(i, A, E) \
+ copyStateVariables(A, E) \
+ ++i; \
+ } \
+ for( /* empty */; i<24; i+=2) { \
+ thetaRhoPiChiIotaPrepareTheta(i , A, E) \
+ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \
+ }
+
+/*******************************************************/
+
+void KeccakP1600times4_PermuteAll_24rounds(void *states)
+{
+ V256 *statesAsLanes = (V256 *)states;
+ declareABCDE
+ #ifndef KeccakP1600times4_fullUnrolling
+ unsigned int i;
+ #endif
+
+ copyFromState(A, statesAsLanes)
+ rounds24
+ copyToState(statesAsLanes, A)
+}
+
+void KeccakP1600times4_PermuteAll_12rounds(void *states)
+{
+ V256 *statesAsLanes = (V256 *)states;
+ declareABCDE
+ #ifndef KeccakP1600times4_fullUnrolling
+ unsigned int i;
+ #endif
+
+ copyFromState(A, statesAsLanes)
+ rounds12
+ copyToState(statesAsLanes, A)
+}
+
+size_t KeccakF1600times4_FastLoop_Absorb(void *states, unsigned int laneCount, unsigned int laneOffsetParallel, unsigned int laneOffsetSerial, const unsigned char *data, size_t dataByteLen)
+{
+ if (laneCount == 21) {
+#if 0
+ const unsigned char *dataStart = data;
+ const UINT64 *curData0 = (const UINT64 *)data;
+ const UINT64 *curData1 = (const UINT64 *)(data+laneOffsetParallel*1*SnP_laneLengthInBytes);
+ const UINT64 *curData2 = (const UINT64 *)(data+laneOffsetParallel*2*SnP_laneLengthInBytes);
+ const UINT64 *curData3 = (const UINT64 *)(data+laneOffsetParallel*3*SnP_laneLengthInBytes);
+
+ while(dataByteLen >= (laneOffsetParallel*3 + laneCount)*8) {
+ V256 *stateAsLanes = (V256 *)states;
+ V256 lanes0, lanes1, lanes2, lanes3, lanesL01, lanesL23, lanesH01, lanesH23;
+ #define Xor_In( argIndex ) \
+ XOReq256(stateAsLanes[argIndex], LOAD4_64(curData3[argIndex], curData2[argIndex], curData1[argIndex], curData0[argIndex]))
+ #define Xor_In4( argIndex ) \
+ lanes0 = LOAD256u( curData0[argIndex]),\
+ lanes1 = LOAD256u( curData1[argIndex]),\
+ lanes2 = LOAD256u( curData2[argIndex]),\
+ lanes3 = LOAD256u( curData3[argIndex]),\
+ INTLEAVE(),\
+ XOReq256( stateAsLanes[argIndex+0], lanes0 ),\
+ XOReq256( stateAsLanes[argIndex+1], lanes1 ),\
+ XOReq256( stateAsLanes[argIndex+2], lanes2 ),\
+ XOReq256( stateAsLanes[argIndex+3], lanes3 )
+ Xor_In4( 0 );
+ Xor_In4( 4 );
+ Xor_In4( 8 );
+ Xor_In4( 12 );
+ Xor_In4( 16 );
+ Xor_In( 20 );
+ #undef Xor_In
+ #undef Xor_In4
+ KeccakP1600times4_PermuteAll_24rounds(states);
+ curData0 += laneOffsetSerial;
+ curData1 += laneOffsetSerial;
+ curData2 += laneOffsetSerial;
+ curData3 += laneOffsetSerial;
+ dataByteLen -= laneOffsetSerial*8;
+ }
+ return (const unsigned char *)curData0 - dataStart;
+#else
+// unsigned int i;
+ const unsigned char *dataStart = data;
+ // correcting cast-align errors
+ // old version: const UINT64 *curData0 = (const UINT64 *)data;
+ const UINT64 *curData0 = (const void *)data;
+ // old version: const UINT64 *curData1 = (const UINT64 *)(data+laneOffsetParallel*1*SnP_laneLengthInBytes);
+ const UINT64 *curData1 = (const void *)(data+laneOffsetParallel*1*SnP_laneLengthInBytes);
+ // old version: const UINT64 *curData2 = (const UINT64 *)(data+laneOffsetParallel*2*SnP_laneLengthInBytes);
+ const UINT64 *curData2 = (const void *)(data+laneOffsetParallel*2*SnP_laneLengthInBytes);
+ // old version: const UINT64 *curData3 = (const UINT64 *)(data+laneOffsetParallel*3*SnP_laneLengthInBytes);
+ const UINT64 *curData3 = (const void *)(data+laneOffsetParallel*3*SnP_laneLengthInBytes);
+ V256 *statesAsLanes = (V256 *)states;
+ declareABCDE
+
+ copyFromState(A, statesAsLanes)
+ while(dataByteLen >= (laneOffsetParallel*3 + laneCount)*8) {
+ #define XOR_In( Xxx, argIndex ) \
+ XOReq256(Xxx, LOAD4_64(curData3[argIndex], curData2[argIndex], curData1[argIndex], curData0[argIndex]))
+ XOR_In( Aba, 0 );
+ XOR_In( Abe, 1 );
+ XOR_In( Abi, 2 );
+ XOR_In( Abo, 3 );
+ XOR_In( Abu, 4 );
+ XOR_In( Aga, 5 );
+ XOR_In( Age, 6 );
+ XOR_In( Agi, 7 );
+ XOR_In( Ago, 8 );
+ XOR_In( Agu, 9 );
+ XOR_In( Aka, 10 );
+ XOR_In( Ake, 11 );
+ XOR_In( Aki, 12 );
+ XOR_In( Ako, 13 );
+ XOR_In( Aku, 14 );
+ XOR_In( Ama, 15 );
+ XOR_In( Ame, 16 );
+ XOR_In( Ami, 17 );
+ XOR_In( Amo, 18 );
+ XOR_In( Amu, 19 );
+ XOR_In( Asa, 20 );
+ #undef XOR_In
+ rounds24
+ curData0 += laneOffsetSerial;
+ curData1 += laneOffsetSerial;
+ curData2 += laneOffsetSerial;
+ curData3 += laneOffsetSerial;
+ dataByteLen -= laneOffsetSerial*8;
+ }
+ copyToState(statesAsLanes, A)
+ return (const unsigned char *)curData0 - dataStart;
+#endif
+ }
+ else {
+// unsigned int i;
+ const unsigned char *dataStart = data;
+
+ while(dataByteLen >= (laneOffsetParallel*3 + laneCount)*8) {
+ KeccakP1600times4_AddLanesAll(states, data, laneCount, laneOffsetParallel);
+ KeccakP1600times4_PermuteAll_24rounds(states);
+ data += laneOffsetSerial*8;
+ dataByteLen -= laneOffsetSerial*8;
+ }
+ return data - dataStart;
+ }
+}
+
+size_t KeccakP1600times4_12rounds_FastLoop_Absorb(void *states, unsigned int laneCount, unsigned int laneOffsetParallel, unsigned int laneOffsetSerial, const unsigned char *data, size_t dataByteLen)
+{
+ if (laneCount == 21) {
+#if 0
+ const unsigned char *dataStart = data;
+ const UINT64 *curData0 = (const UINT64 *)data;
+ const UINT64 *curData1 = (const UINT64 *)(data+laneOffsetParallel*1*SnP_laneLengthInBytes);
+ const UINT64 *curData2 = (const UINT64 *)(data+laneOffsetParallel*2*SnP_laneLengthInBytes);
+ const UINT64 *curData3 = (const UINT64 *)(data+laneOffsetParallel*3*SnP_laneLengthInBytes);
+
+ while(dataByteLen >= (laneOffsetParallel*3 + laneCount)*8) {
+ V256 *stateAsLanes = states;
+ V256 lanes0, lanes1, lanes2, lanes3, lanesL01, lanesL23, lanesH01, lanesH23;
+ #define Xor_In( argIndex ) \
+ XOReq256(stateAsLanes[argIndex], LOAD4_64(curData3[argIndex], curData2[argIndex], curData1[argIndex], curData0[argIndex]))
+ #define Xor_In4( argIndex ) \
+ lanes0 = LOAD256u( curData0[argIndex]),\
+ lanes1 = LOAD256u( curData1[argIndex]),\
+ lanes2 = LOAD256u( curData2[argIndex]),\
+ lanes3 = LOAD256u( curData3[argIndex]),\
+ INTLEAVE(),\
+ XOReq256( stateAsLanes[argIndex+0], lanes0 ),\
+ XOReq256( stateAsLanes[argIndex+1], lanes1 ),\
+ XOReq256( stateAsLanes[argIndex+2], lanes2 ),\
+ XOReq256( stateAsLanes[argIndex+3], lanes3 )
+ Xor_In4( 0 );
+ Xor_In4( 4 );
+ Xor_In4( 8 );
+ Xor_In4( 12 );
+ Xor_In4( 16 );
+ Xor_In( 20 );
+ #undef Xor_In
+ #undef Xor_In4
+ KeccakP1600times4_PermuteAll_12rounds(states);
+ curData0 += laneOffsetSerial;
+ curData1 += laneOffsetSerial;
+ curData2 += laneOffsetSerial;
+ curData3 += laneOffsetSerial;
+ dataByteLen -= laneOffsetSerial*8;
+ }
+ return (const unsigned char *)curData0 - dataStart;
+#else
+// unsigned int i;
+ const unsigned char *dataStart = data;
+ // correcting cast-align errors
+ // old version: const UINT64 *curData0 = (const UINT64 *)data;
+ const UINT64 *curData0 = (const void *)data;
+ // old version: const UINT64 *curData1 = (const UINT64 *)(data+laneOffsetParallel*1*SnP_laneLengthInBytes);
+ const UINT64 *curData1 = (const void *)(data+laneOffsetParallel*1*SnP_laneLengthInBytes);
+ // old version: const UINT64 *curData2 = (const UINT64 *)(data+laneOffsetParallel*2*SnP_laneLengthInBytes);
+ const UINT64 *curData2 = (const void *)(data+laneOffsetParallel*2*SnP_laneLengthInBytes);
+ // old version: const UINT64 *curData3 = (const UINT64 *)(data+laneOffsetParallel*3*SnP_laneLengthInBytes);
+ const UINT64 *curData3 = (const void *)(data+laneOffsetParallel*3*SnP_laneLengthInBytes);
+ V256 *statesAsLanes = states;
+ declareABCDE
+
+ copyFromState(A, statesAsLanes)
+ while(dataByteLen >= (laneOffsetParallel*3 + laneCount)*8) {
+ #define XOR_In( Xxx, argIndex ) \
+ XOReq256(Xxx, LOAD4_64(curData3[argIndex], curData2[argIndex], curData1[argIndex], curData0[argIndex]))
+ XOR_In( Aba, 0 );
+ XOR_In( Abe, 1 );
+ XOR_In( Abi, 2 );
+ XOR_In( Abo, 3 );
+ XOR_In( Abu, 4 );
+ XOR_In( Aga, 5 );
+ XOR_In( Age, 6 );
+ XOR_In( Agi, 7 );
+ XOR_In( Ago, 8 );
+ XOR_In( Agu, 9 );
+ XOR_In( Aka, 10 );
+ XOR_In( Ake, 11 );
+ XOR_In( Aki, 12 );
+ XOR_In( Ako, 13 );
+ XOR_In( Aku, 14 );
+ XOR_In( Ama, 15 );
+ XOR_In( Ame, 16 );
+ XOR_In( Ami, 17 );
+ XOR_In( Amo, 18 );
+ XOR_In( Amu, 19 );
+ XOR_In( Asa, 20 );
+ #undef XOR_In
+ rounds12
+ curData0 += laneOffsetSerial;
+ curData1 += laneOffsetSerial;
+ curData2 += laneOffsetSerial;
+ curData3 += laneOffsetSerial;
+ dataByteLen -= laneOffsetSerial*8;
+ }
+ copyToState(statesAsLanes, A)
+ return (const unsigned char *)curData0 - dataStart;
+#endif
+ }
+ else {
+// unsigned int i;
+ const unsigned char *dataStart = data;
+
+ while(dataByteLen >= (laneOffsetParallel*3 + laneCount)*8) {
+ KeccakP1600times4_AddLanesAll(states, data, laneCount, laneOffsetParallel);
+ KeccakP1600times4_PermuteAll_12rounds(states);
+ data += laneOffsetSerial*8;
+ dataByteLen -= laneOffsetSerial*8;
+ }
+ return data - dataStart;
+ }
+}
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-1600-times4-SnP_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-1600-times4-SnP_avx2.h
new file mode 100644
index 0000000000..2640191779
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-1600-times4-SnP_avx2.h
@@ -0,0 +1,63 @@
+/*
+Implementation by the Keccak, Keyak and Ketje Teams, namely, Guido Bertoni,
+Joan Daemen, Michaël Peeters, Gilles Van Assche and Ronny Van Keer, hereby
+denoted as "the implementer".
+
+For more information, feedback or questions, please refer to our websites:
+http://keccak.noekeon.org/
+http://keyak.noekeon.org/
+http://ketje.noekeon.org/
+
+To the extent possible under law, the implementer has waived all copyright
+and related or neighboring rights to the source code in this file.
+http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+#pragma once
+
+/** For the documentation, see PlSnP-documentation.h.
+ */
+
+#include "KeccakP-SIMD256-config_avx2.h"
+#include "kyber512r3_params.h"
+#include "kyber512r3_fips202x4_avx2.h"
+
+#define KeccakP1600times4_implementation "256-bit SIMD implementation (" KeccakP1600times4_implementation_config ")"
+#define KeccakP1600times4_statesSizeInBytes 800
+#define KeccakP1600times4_statesAlignment 32
+#define KeccakF1600times4_FastLoop_supported
+#define KeccakP1600times4_12rounds_FastLoop_supported
+
+#include <stddef.h>
+
+#define KeccakP1600times4_StaticInitialize()
+#define KeccakP1600times4_InitializeAll S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_InitializeAll)
+void KeccakP1600times4_InitializeAll(void *states);
+#define KeccakP1600times4_AddByte(states, instanceIndex, byte, offset) \
+ ((unsigned char*)(states))[(instanceIndex)*8 + ((offset)/8)*4*8 + (offset)%8] ^= (byte)
+#define KeccakP1600times4_AddBytes S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_AddBytes)
+void KeccakP1600times4_AddBytes(void *states, unsigned int instanceIndex, const unsigned char *data, unsigned int offset, unsigned int length);
+#define KeccakP1600times4_AddLanesAll S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_AddLanesAll)
+void KeccakP1600times4_AddLanesAll(void *states, const unsigned char *data, unsigned int laneCount, unsigned int laneOffset);
+#define KeccakP1600times4_OverwriteBytes S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_OverwriteBytes)
+void KeccakP1600times4_OverwriteBytes(void *states, unsigned int instanceIndex, const unsigned char *data, unsigned int offset, unsigned int length);
+#define KeccakP1600times4_OverwriteLanesAll S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_OverwriteLanesAll)
+void KeccakP1600times4_OverwriteLanesAll(void *states, const unsigned char *data, unsigned int laneCount, unsigned int laneOffset);
+#define KeccakP1600times4_OverwriteWithZeroes S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_OverwriteWithZeroes)
+void KeccakP1600times4_OverwriteWithZeroes(void *states, unsigned int instanceIndex, unsigned int byteCount);
+#define KeccakP1600times4_PermuteAll_12rounds S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_PermuteAll_12rounds)
+void KeccakP1600times4_PermuteAll_12rounds(void *states);
+#define KeccakP1600times4_PermuteAll_24rounds S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_PermuteAll_24rounds)
+void KeccakP1600times4_PermuteAll_24rounds(void *states);
+#define KeccakP1600times4_ExtractBytes S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_ExtractBytes)
+void KeccakP1600times4_ExtractBytes(const void *states, unsigned int instanceIndex, unsigned char *data, unsigned int offset, unsigned int length);
+#define KeccakP1600times4_ExtractLanesAll S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_ExtractLanesAll)
+void KeccakP1600times4_ExtractLanesAll(const void *states, unsigned char *data, unsigned int laneCount, unsigned int laneOffset);
+#define KeccakP1600times4_ExtractAndAddBytes S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_ExtractAndAddBytes)
+void KeccakP1600times4_ExtractAndAddBytes(const void *states, unsigned int instanceIndex, const unsigned char *input, unsigned char *output, unsigned int offset, unsigned int length);
+#define KeccakP1600times4_ExtractAndAddLanesAll S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_ExtractAndAddLanesAll)
+void KeccakP1600times4_ExtractAndAddLanesAll(const void *states, const unsigned char *input, unsigned char *output, unsigned int laneCount, unsigned int laneOffset);
+#define KeccakF1600times4_FastLoop_Absorb S2N_KYBER_512_R3_NAMESPACE(KeccakF1600times4_FastLoop_Absorb)
+size_t KeccakF1600times4_FastLoop_Absorb(void *states, unsigned int laneCount, unsigned int laneOffsetParallel, unsigned int laneOffsetSerial, const unsigned char *data, size_t dataByteLen);
+#define KeccakP1600times4_12rounds_FastLoop_Absorb S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_12rounds_FastLoop_Absorb)
+size_t KeccakP1600times4_12rounds_FastLoop_Absorb(void *states, unsigned int laneCount, unsigned int laneOffsetParallel, unsigned int laneOffsetSerial, const unsigned char *data, size_t dataByteLen);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-SIMD256-config_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-SIMD256-config_avx2.h
new file mode 100644
index 0000000000..1c65fe29b4
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-SIMD256-config_avx2.h
@@ -0,0 +1,3 @@
+#define KeccakP1600times4_implementation_config "AVX2, all rounds unrolled"
+#define KeccakP1600times4_fullUnrolling
+#define KeccakP1600times4_useAVX2
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-align_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-align_avx2.h
new file mode 100644
index 0000000000..be08e84af2
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-align_avx2.h
@@ -0,0 +1,31 @@
+/*
+Implementation by the Keccak, Keyak and Ketje Teams, namely, Guido Bertoni,
+Joan Daemen, Michaël Peeters, Gilles Van Assche and Ronny Van Keer, hereby
+denoted as "the implementer".
+
+For more information, feedback or questions, please refer to our websites:
+http://keccak.noekeon.org/
+http://keyak.noekeon.org/
+http://ketje.noekeon.org/
+
+To the extent possible under law, the implementer has waived all copyright
+and related or neighboring rights to the source code in this file.
+http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+#pragma once
+
+/* on Mac OS-X and possibly others, ALIGN(x) is defined in param.h, and -Werror chokes on the redef. */
+#ifdef ALIGN
+#undef ALIGN
+#endif
+
+#if defined(__GNUC__)
+#define ALIGN(x) __attribute__ ((aligned(x)))
+#elif defined(_MSC_VER)
+#define ALIGN(x) __declspec(align(x))
+#elif defined(__ARMCC_VERSION)
+#define ALIGN(x) __align(x)
+#else
+#define ALIGN(x)
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-brg_endian_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-brg_endian_avx2.h
new file mode 100644
index 0000000000..8e8b73cf2a
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/KeccakP-brg_endian_avx2.h
@@ -0,0 +1,139 @@
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 1998-2008, Brian Gladman, Worcester, UK. All rights reserved.
+
+ LICENSE TERMS
+
+ The redistribution and use of this software (with or without changes)
+ is allowed without the payment of fees or royalties provided that:
+
+ 1. source code distributions include the above copyright notice, this
+ list of conditions and the following disclaimer;
+
+ 2. binary distributions include the above copyright notice, this list
+ of conditions and the following disclaimer in their documentation;
+
+ 3. the name of the copyright holder is not used to endorse products
+ built using this software without specific written permission.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 20/12/2007
+ Changes for ARM 9/9/2010
+*/
+
+#pragma once
+
+#define IS_BIG_ENDIAN 4321 /* byte 0 is most significant (mc68k) */
+#define IS_LITTLE_ENDIAN 1234 /* byte 0 is least significant (i386) */
+
+#if 0
+/* Include files where endian defines and byteswap functions may reside */
+#if defined( __sun )
+# include <sys/isa_defs.h>
+#elif defined( __FreeBSD__ ) || defined( __OpenBSD__ ) || defined( __NetBSD__ )
+# include <sys/endian.h>
+#elif defined( BSD ) && ( BSD >= 199103 ) || defined( __APPLE__ ) || \
+ defined( __CYGWIN32__ ) || defined( __DJGPP__ ) || defined( __osf__ )
+# include <machine/endian.h>
+#elif defined( __linux__ ) || defined( __GNUC__ ) || defined( __GNU_LIBRARY__ )
+# if !defined( __MINGW32__ ) && !defined( _AIX )
+# include <endian.h>
+# if !defined( __BEOS__ )
+# include <byteswap.h>
+# endif
+# endif
+#endif
+#endif
+
+/* Now attempt to set the define for platform byte order using any */
+/* of the four forms SYMBOL, _SYMBOL, __SYMBOL & __SYMBOL__, which */
+/* seem to encompass most endian symbol definitions */
+
+#if defined( BIG_ENDIAN ) && defined( LITTLE_ENDIAN )
+# if defined( BYTE_ORDER ) && BYTE_ORDER == BIG_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+# elif defined( BYTE_ORDER ) && BYTE_ORDER == LITTLE_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+# endif
+#elif defined( BIG_ENDIAN )
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+#elif defined( LITTLE_ENDIAN )
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+#endif
+
+#if defined( _BIG_ENDIAN ) && defined( _LITTLE_ENDIAN )
+# if defined( _BYTE_ORDER ) && _BYTE_ORDER == _BIG_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+# elif defined( _BYTE_ORDER ) && _BYTE_ORDER == _LITTLE_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+# endif
+#elif defined( _BIG_ENDIAN )
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+#elif defined( _LITTLE_ENDIAN )
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+#endif
+
+#if defined( __BIG_ENDIAN ) && defined( __LITTLE_ENDIAN )
+# if defined( __BYTE_ORDER ) && __BYTE_ORDER == __BIG_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+# elif defined( __BYTE_ORDER ) && __BYTE_ORDER == __LITTLE_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+# endif
+#elif defined( __BIG_ENDIAN )
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+#elif defined( __LITTLE_ENDIAN )
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+#endif
+
+#if defined( __BIG_ENDIAN__ ) && defined( __LITTLE_ENDIAN__ )
+# if defined( __BYTE_ORDER__ ) && __BYTE_ORDER__ == __BIG_ENDIAN__
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+# elif defined( __BYTE_ORDER__ ) && __BYTE_ORDER__ == __LITTLE_ENDIAN__
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+# endif
+#elif defined( __BIG_ENDIAN__ )
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+#elif defined( __LITTLE_ENDIAN__ )
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+#endif
+
+/* if the platform byte order could not be determined, then try to */
+/* set this define using common machine defines */
+#if !defined(PLATFORM_BYTE_ORDER)
+
+#if defined( __alpha__ ) || defined( __alpha ) || defined( i386 ) || \
+ defined( __i386__ ) || defined( _M_I86 ) || defined( _M_IX86 ) || \
+ defined( __OS2__ ) || defined( sun386 ) || defined( __TURBOC__ ) || \
+ defined( vax ) || defined( vms ) || defined( VMS ) || \
+ defined( __VMS ) || defined( _M_X64 )
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+
+#elif defined( AMIGA ) || defined( applec ) || defined( __AS400__ ) || \
+ defined( _CRAY ) || defined( __hppa ) || defined( __hp9000 ) || \
+ defined( ibm370 ) || defined( mc68000 ) || defined( m68k ) || \
+ defined( __MRC__ ) || defined( __MVS__ ) || defined( __MWERKS__ ) || \
+ defined( sparc ) || defined( __sparc) || defined( SYMANTEC_C ) || \
+ defined( __VOS__ ) || defined( __TIGCC__ ) || defined( __TANDEM ) || \
+ defined( THINK_C ) || defined( __VMCMS__ ) || defined( _AIX )
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+
+#elif defined(__arm__)
+# ifdef __BIG_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+# else
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+# endif
+#elif 1 /* **** EDIT HERE IF NECESSARY **** */
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+#elif 0 /* **** EDIT HERE IF NECESSARY **** */
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+#else
+# error Please edit lines 132 or 134 in brg_endian.h to set the platform byte order
+#endif
+
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_align_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_align_avx2.h
new file mode 100644
index 0000000000..79e6d9ec0c
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_align_avx2.h
@@ -0,0 +1,19 @@
+#pragma once
+
+#include <stdint.h>
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#include <immintrin.h>
+
+#define ALIGNED_UINT8(N) \
+ union { \
+ uint8_t coeffs[N]; \
+ __m256i vec[(N+31)/32]; \
+ }
+
+#define ALIGNED_INT16(N) \
+ union { \
+ int16_t coeffs[N]; \
+ __m256i vec[(N+15)/16]; \
+ }
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_basemul_avx2.S b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_basemul_avx2.S
new file mode 100644
index 0000000000..ed2a65be20
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_basemul_avx2.S
@@ -0,0 +1,105 @@
+#include "kyber512r3_consts_avx2.h"
+
+.macro schoolbook off
+vmovdqa _16XQINV*2(%rcx),%ymm0
+vmovdqa (64*\off+ 0)*2(%rsi),%ymm1 # a0
+vmovdqa (64*\off+16)*2(%rsi),%ymm2 # b0
+vmovdqa (64*\off+32)*2(%rsi),%ymm3 # a1
+vmovdqa (64*\off+48)*2(%rsi),%ymm4 # b1
+
+vpmullw %ymm0,%ymm1,%ymm9 # a0.lo
+vpmullw %ymm0,%ymm2,%ymm10 # b0.lo
+vpmullw %ymm0,%ymm3,%ymm11 # a1.lo
+vpmullw %ymm0,%ymm4,%ymm12 # b1.lo
+
+vmovdqa (64*\off+ 0)*2(%rdx),%ymm5 # c0
+vmovdqa (64*\off+16)*2(%rdx),%ymm6 # d0
+
+vpmulhw %ymm5,%ymm1,%ymm13 # a0c0.hi
+vpmulhw %ymm6,%ymm1,%ymm1 # a0d0.hi
+vpmulhw %ymm5,%ymm2,%ymm14 # b0c0.hi
+vpmulhw %ymm6,%ymm2,%ymm2 # b0d0.hi
+
+vmovdqa (64*\off+32)*2(%rdx),%ymm7 # c1
+vmovdqa (64*\off+48)*2(%rdx),%ymm8 # d1
+
+vpmulhw %ymm7,%ymm3,%ymm15 # a1c1.hi
+vpmulhw %ymm8,%ymm3,%ymm3 # a1d1.hi
+vpmulhw %ymm7,%ymm4,%ymm0 # b1c1.hi
+vpmulhw %ymm8,%ymm4,%ymm4 # b1d1.hi
+
+vmovdqa %ymm13,(%rsp)
+
+vpmullw %ymm5,%ymm9,%ymm13 # a0c0.lo
+vpmullw %ymm6,%ymm9,%ymm9 # a0d0.lo
+vpmullw %ymm5,%ymm10,%ymm5 # b0c0.lo
+vpmullw %ymm6,%ymm10,%ymm10 # b0d0.lo
+
+vpmullw %ymm7,%ymm11,%ymm6 # a1c1.lo
+vpmullw %ymm8,%ymm11,%ymm11 # a1d1.lo
+vpmullw %ymm7,%ymm12,%ymm7 # b1c1.lo
+vpmullw %ymm8,%ymm12,%ymm12 # b1d1.lo
+
+vmovdqa _16XQ*2(%rcx),%ymm8
+vpmulhw %ymm8,%ymm13,%ymm13
+vpmulhw %ymm8,%ymm9,%ymm9
+vpmulhw %ymm8,%ymm5,%ymm5
+vpmulhw %ymm8,%ymm10,%ymm10
+vpmulhw %ymm8,%ymm6,%ymm6
+vpmulhw %ymm8,%ymm11,%ymm11
+vpmulhw %ymm8,%ymm7,%ymm7
+vpmulhw %ymm8,%ymm12,%ymm12
+
+vpsubw (%rsp),%ymm13,%ymm13 # -a0c0
+vpsubw %ymm9,%ymm1,%ymm9 # a0d0
+vpsubw %ymm5,%ymm14,%ymm5 # b0c0
+vpsubw %ymm10,%ymm2,%ymm10 # b0d0
+
+vpsubw %ymm6,%ymm15,%ymm6 # a1c1
+vpsubw %ymm11,%ymm3,%ymm11 # a1d1
+vpsubw %ymm7,%ymm0,%ymm7 # b1c1
+vpsubw %ymm12,%ymm4,%ymm12 # b1d1
+
+vmovdqa (%r9),%ymm0
+vmovdqa 32(%r9),%ymm1
+vpmullw %ymm0,%ymm10,%ymm2
+vpmullw %ymm0,%ymm12,%ymm3
+vpmulhw %ymm1,%ymm10,%ymm10
+vpmulhw %ymm1,%ymm12,%ymm12
+vpmulhw %ymm8,%ymm2,%ymm2
+vpmulhw %ymm8,%ymm3,%ymm3
+vpsubw %ymm2,%ymm10,%ymm10 # rb0d0
+vpsubw %ymm3,%ymm12,%ymm12 # rb1d1
+
+vpaddw %ymm5,%ymm9,%ymm9
+vpaddw %ymm7,%ymm11,%ymm11
+vpsubw %ymm13,%ymm10,%ymm13
+vpsubw %ymm12,%ymm6,%ymm6
+
+vmovdqa %ymm13,(64*\off+ 0)*2(%rdi)
+vmovdqa %ymm9,(64*\off+16)*2(%rdi)
+vmovdqa %ymm6,(64*\off+32)*2(%rdi)
+vmovdqa %ymm11,(64*\off+48)*2(%rdi)
+.endm
+
+.text
+.global cdecl(basemul_avx2_asm)
+cdecl(basemul_avx2_asm):
+mov %rsp,%r8
+and $-32,%rsp
+sub $32,%rsp
+
+lea (_ZETAS_EXP+176)*2(%rcx),%r9
+schoolbook 0
+
+add $32*2,%r9
+schoolbook 1
+
+add $192*2,%r9
+schoolbook 2
+
+add $32*2,%r9
+schoolbook 3
+
+mov %r8,%rsp
+ret
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd.c
new file mode 100644
index 0000000000..ef0bb87946
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd.c
@@ -0,0 +1,104 @@
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_cbd.h"
+
+/*************************************************
+* Name: load32_littleendian
+*
+* Description: load 4 bytes into a 32-bit integer
+* in little-endian order
+*
+* Arguments: - const uint8_t *x: pointer to input byte array
+*
+* Returns 32-bit unsigned integer loaded from x
+**************************************************/
+static uint32_t load32_littleendian(const uint8_t x[4]) {
+ uint32_t r;
+ r = (uint32_t)x[0];
+ r |= (uint32_t)x[1] << 8;
+ r |= (uint32_t)x[2] << 16;
+ r |= (uint32_t)x[3] << 24;
+ return r;
+}
+
+/*************************************************
+* Name: load24_littleendian
+*
+* Description: load 3 bytes into a 32-bit integer
+* in little-endian order
+* This function is only needed for Kyber-512
+*
+* Arguments: - const uint8_t *x: pointer to input byte array
+*
+* Returns 32-bit unsigned integer loaded from x (most significant byte is zero)
+**************************************************/
+static uint32_t load24_littleendian(const uint8_t x[3]) {
+ uint32_t r;
+ r = (uint32_t)x[0];
+ r |= (uint32_t)x[1] << 8;
+ r |= (uint32_t)x[2] << 16;
+ return r;
+}
+
+
+/*************************************************
+* Name: cbd2
+*
+* Description: Given an array of uniformly random bytes, compute
+* polynomial with coefficients distributed according to
+* a centered binomial distribution with parameter eta=2
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const uint8_t *buf: pointer to input byte array
+**************************************************/
+static void cbd2(poly *r, const uint8_t buf[2 * S2N_KYBER_512_R3_N / 4]) {
+ unsigned int i, j;
+
+ for (i = 0; i < S2N_KYBER_512_R3_N / 8; i++) {
+ uint32_t t = load32_littleendian(buf + 4 * i);
+ uint32_t d = t & 0x55555555;
+ d += (t >> 1) & 0x55555555;
+
+ for (j = 0; j < 8; j++) {
+ int16_t a = (d >> (4 * j + 0)) & 0x3;
+ int16_t b = (d >> (4 * j + 2)) & 0x3;
+ r->coeffs[8 * i + j] = a - b;
+ }
+ }
+}
+
+/*************************************************
+* Name: cbd3
+*
+* Description: Given an array of uniformly random bytes, compute
+* polynomial with coefficients distributed according to
+* a centered binomial distribution with parameter eta=3
+* This function is only needed for Kyber-512
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const uint8_t *buf: pointer to input byte array
+**************************************************/
+static void cbd3(poly *r, const uint8_t buf[3 * S2N_KYBER_512_R3_N / 4]) {
+ unsigned int i, j;
+
+ for (i = 0; i < S2N_KYBER_512_R3_N / 4; i++) {
+ uint32_t t = load24_littleendian(buf + 3 * i);
+ uint32_t d = t & 0x00249249;
+ d += (t >> 1) & 0x00249249;
+ d += (t >> 2) & 0x00249249;
+
+ for (j = 0; j < 4; j++) {
+ int16_t a = (d >> (6 * j + 0)) & 0x7;
+ int16_t b = (d >> (6 * j + 3)) & 0x7;
+ r->coeffs[4 * i + j] = a - b;
+ }
+ }
+}
+
+void cbd_eta1(poly *r, const uint8_t buf[S2N_KYBER_512_R3_ETA1 * S2N_KYBER_512_R3_N / 4]) {
+ cbd3(r, buf);
+}
+
+void cbd_eta2(poly *r, const uint8_t buf[S2N_KYBER_512_R3_ETA2 * S2N_KYBER_512_R3_N / 4]) {
+ cbd2(r, buf);
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd.h
new file mode 100644
index 0000000000..631821956c
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd.h
@@ -0,0 +1,11 @@
+#pragma once
+
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_poly.h"
+
+#define cbd_eta1 S2N_KYBER_512_R3_NAMESPACE(cbd_eta1)
+void cbd_eta1(poly *r, const uint8_t buf[S2N_KYBER_512_R3_ETA1 * S2N_KYBER_512_R3_N / 4]);
+
+#define cbd_eta2 S2N_KYBER_512_R3_NAMESPACE(cbd_eta2)
+void cbd_eta2(poly *r, const uint8_t buf[S2N_KYBER_512_R3_ETA2 * S2N_KYBER_512_R3_N / 4]);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd_avx2.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd_avx2.c
new file mode 100644
index 0000000000..a922bd220f
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd_avx2.c
@@ -0,0 +1,137 @@
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_cbd_avx2.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+/*************************************************
+* Name: cbd2
+*
+* Description: Given an array of uniformly random bytes, compute
+* polynomial with coefficients distributed according to
+* a centered binomial distribution with parameter eta=2
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const __m256i *buf: pointer to aligned input byte array
+**************************************************/
+static void cbd2(poly * restrict r, const __m256i buf[2*S2N_KYBER_512_R3_N/128])
+{
+ unsigned int i;
+ __m256i f0, f1, f2, f3;
+ const __m256i mask55 = _mm256_set1_epi32(0x55555555);
+ const __m256i mask33 = _mm256_set1_epi32(0x33333333);
+ const __m256i mask03 = _mm256_set1_epi32(0x03030303);
+ const __m256i mask0F = _mm256_set1_epi32(0x0F0F0F0F);
+
+ for(i = 0; i < S2N_KYBER_512_R3_N/64; i++) {
+ f0 = _mm256_load_si256(&buf[i]);
+
+ f1 = _mm256_srli_epi16(f0, 1);
+ f0 = _mm256_and_si256(mask55, f0);
+ f1 = _mm256_and_si256(mask55, f1);
+ f0 = _mm256_add_epi8(f0, f1);
+
+ f1 = _mm256_srli_epi16(f0, 2);
+ f0 = _mm256_and_si256(mask33, f0);
+ f1 = _mm256_and_si256(mask33, f1);
+ f0 = _mm256_add_epi8(f0, mask33);
+ f0 = _mm256_sub_epi8(f0, f1);
+
+ f1 = _mm256_srli_epi16(f0, 4);
+ f0 = _mm256_and_si256(mask0F, f0);
+ f1 = _mm256_and_si256(mask0F, f1);
+ f0 = _mm256_sub_epi8(f0, mask03);
+ f1 = _mm256_sub_epi8(f1, mask03);
+
+ f2 = _mm256_unpacklo_epi8(f0, f1);
+ f3 = _mm256_unpackhi_epi8(f0, f1);
+
+ f0 = _mm256_cvtepi8_epi16(_mm256_castsi256_si128(f2));
+ f1 = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(f2,1));
+ f2 = _mm256_cvtepi8_epi16(_mm256_castsi256_si128(f3));
+ f3 = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(f3,1));
+
+ _mm256_store_si256(&r->vec[4*i+0], f0);
+ _mm256_store_si256(&r->vec[4*i+1], f2);
+ _mm256_store_si256(&r->vec[4*i+2], f1);
+ _mm256_store_si256(&r->vec[4*i+3], f3);
+ }
+}
+
+/*************************************************
+* Name: cbd3
+*
+* Description: Given an array of uniformly random bytes, compute
+* polynomial with coefficients distributed according to
+* a centered binomial distribution with parameter eta=3
+* This function is only needed for Kyber-512
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const __m256i *buf: pointer to aligned input byte array
+**************************************************/
+static void cbd3(poly * restrict r, const uint8_t buf[3*S2N_KYBER_512_R3_N/4+8])
+{
+ unsigned int i;
+ __m256i f0, f1, f2, f3;
+ const __m256i mask249 = _mm256_set1_epi32(0x249249);
+ const __m256i mask6DB = _mm256_set1_epi32(0x6DB6DB);
+ const __m256i mask07 = _mm256_set1_epi32(7);
+ const __m256i mask70 = _mm256_set1_epi32(7 << 16);
+ const __m256i mask3 = _mm256_set1_epi16(3);
+ const __m256i shufbidx = _mm256_set_epi8(-1,15,14,13,-1,12,11,10,-1, 9, 8, 7,-1, 6, 5, 4,
+ -1,11,10, 9,-1, 8, 7, 6,-1, 5, 4, 3,-1, 2, 1, 0);
+
+ for(i = 0; i < S2N_KYBER_512_R3_N/32; i++) {
+ // correcting cast-align and cast-qual errors
+ // old version: f0 = _mm256_loadu_si256((__m256i *)&buf[24*i]);
+ f0 = _mm256_loadu_si256((const void *)&buf[24*i]);
+ f0 = _mm256_permute4x64_epi64(f0,0x94);
+ f0 = _mm256_shuffle_epi8(f0,shufbidx);
+
+ f1 = _mm256_srli_epi32(f0,1);
+ f2 = _mm256_srli_epi32(f0,2);
+ f0 = _mm256_and_si256(mask249,f0);
+ f1 = _mm256_and_si256(mask249,f1);
+ f2 = _mm256_and_si256(mask249,f2);
+ f0 = _mm256_add_epi32(f0,f1);
+ f0 = _mm256_add_epi32(f0,f2);
+
+ f1 = _mm256_srli_epi32(f0,3);
+ f0 = _mm256_add_epi32(f0,mask6DB);
+ f0 = _mm256_sub_epi32(f0,f1);
+
+ f1 = _mm256_slli_epi32(f0,10);
+ f2 = _mm256_srli_epi32(f0,12);
+ f3 = _mm256_srli_epi32(f0, 2);
+ f0 = _mm256_and_si256(f0,mask07);
+ f1 = _mm256_and_si256(f1,mask70);
+ f2 = _mm256_and_si256(f2,mask07);
+ f3 = _mm256_and_si256(f3,mask70);
+ f0 = _mm256_add_epi16(f0,f1);
+ f1 = _mm256_add_epi16(f2,f3);
+ f0 = _mm256_sub_epi16(f0,mask3);
+ f1 = _mm256_sub_epi16(f1,mask3);
+
+ f2 = _mm256_unpacklo_epi32(f0,f1);
+ f3 = _mm256_unpackhi_epi32(f0,f1);
+
+ f0 = _mm256_permute2x128_si256(f2,f3,0x20);
+ f1 = _mm256_permute2x128_si256(f2,f3,0x31);
+
+ _mm256_store_si256(&r->vec[2*i+0], f0);
+ _mm256_store_si256(&r->vec[2*i+1], f1);
+ }
+}
+
+/* buf 32 bytes longer for cbd3 */
+void poly_cbd_eta1_avx2(poly *r, const __m256i buf[S2N_KYBER_512_R3_ETA1*S2N_KYBER_512_R3_N/128+1])
+{
+ // correcting cast-align and cast-qual errors
+ // old version: cbd3(r, (uint8_t *)buf);
+ cbd3(r, (const void *)buf);
+}
+
+void poly_cbd_eta2_avx2(poly *r, const __m256i buf[S2N_KYBER_512_R3_ETA2*S2N_KYBER_512_R3_N/128])
+{
+ cbd2(r, buf);
+}
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd_avx2.h
new file mode 100644
index 0000000000..972c71fbf5
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_cbd_avx2.h
@@ -0,0 +1,15 @@
+#pragma once
+
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_poly_avx2.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#include <immintrin.h>
+
+#define poly_cbd_eta1_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_cbd_eta1_avx2)
+void poly_cbd_eta1_avx2(poly *r, const __m256i buf[S2N_KYBER_512_R3_ETA1*S2N_KYBER_512_R3_N/128+1]);
+
+#define poly_cbd_eta2_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_cbd_eta2_avx2)
+void poly_cbd_eta2_avx2(poly *r, const __m256i buf[S2N_KYBER_512_R3_ETA2*S2N_KYBER_512_R3_N/128]);
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_consts_avx2.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_consts_avx2.c
new file mode 100644
index 0000000000..cdc0b817df
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_consts_avx2.c
@@ -0,0 +1,122 @@
+#include "kyber512r3_align_avx2.h"
+#include "kyber512r3_consts_avx2.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#define Q S2N_KYBER_512_R3_Q
+#define MONT -1044 // 2^16 mod q
+#define QINV -3327 // q^-1 mod 2^16
+#define V 20159 // floor(2^26/q + 0.5)
+#define FHI 1441 // mont^2/128
+#define FLO -10079 // qinv*FHI
+#define MONTSQHI 1353 // mont^2
+#define MONTSQLO 20553 // qinv*MONTSQHI
+#define MASK 4095
+#define SHIFT 32
+
+const qdata_t qdata = {{
+#define _16XQ 0
+ Q, Q, Q, Q, Q, Q, Q, Q, Q, Q, Q, Q, Q, Q, Q, Q,
+
+#define _16XQINV 16
+ QINV, QINV, QINV, QINV, QINV, QINV, QINV, QINV,
+ QINV, QINV, QINV, QINV, QINV, QINV, QINV, QINV,
+
+#define _16XV 32
+ V, V, V, V, V, V, V, V, V, V, V, V, V, V, V, V,
+
+#define _16XFLO 48
+ FLO, FLO, FLO, FLO, FLO, FLO, FLO, FLO,
+ FLO, FLO, FLO, FLO, FLO, FLO, FLO, FLO,
+
+#define _16XFHI 64
+ FHI, FHI, FHI, FHI, FHI, FHI, FHI, FHI,
+ FHI, FHI, FHI, FHI, FHI, FHI, FHI, FHI,
+
+#define _16XMONTSQLO 80
+ MONTSQLO, MONTSQLO, MONTSQLO, MONTSQLO,
+ MONTSQLO, MONTSQLO, MONTSQLO, MONTSQLO,
+ MONTSQLO, MONTSQLO, MONTSQLO, MONTSQLO,
+ MONTSQLO, MONTSQLO, MONTSQLO, MONTSQLO,
+
+#define _16XMONTSQHI 96
+ MONTSQHI, MONTSQHI, MONTSQHI, MONTSQHI,
+ MONTSQHI, MONTSQHI, MONTSQHI, MONTSQHI,
+ MONTSQHI, MONTSQHI, MONTSQHI, MONTSQHI,
+ MONTSQHI, MONTSQHI, MONTSQHI, MONTSQHI,
+
+#define _16XMASK 112
+ MASK, MASK, MASK, MASK, MASK, MASK, MASK, MASK,
+ MASK, MASK, MASK, MASK, MASK, MASK, MASK, MASK,
+
+#define _REVIDXB 128
+ 3854, 3340, 2826, 2312, 1798, 1284, 770, 256,
+ 3854, 3340, 2826, 2312, 1798, 1284, 770, 256,
+
+#define _REVIDXD 144
+ 7, 0, 6, 0, 5, 0, 4, 0, 3, 0, 2, 0, 1, 0, 0, 0,
+
+#define _ZETAS_EXP 160
+ 31498, 31498, 31498, 31498, -758, -758, -758, -758,
+ 5237, 5237, 5237, 5237, 1397, 1397, 1397, 1397,
+ 14745, 14745, 14745, 14745, 14745, 14745, 14745, 14745,
+ 14745, 14745, 14745, 14745, 14745, 14745, 14745, 14745,
+ -359, -359, -359, -359, -359, -359, -359, -359,
+ -359, -359, -359, -359, -359, -359, -359, -359,
+ 13525, 13525, 13525, 13525, 13525, 13525, 13525, 13525,
+ -12402, -12402, -12402, -12402, -12402, -12402, -12402, -12402,
+ 1493, 1493, 1493, 1493, 1493, 1493, 1493, 1493,
+ 1422, 1422, 1422, 1422, 1422, 1422, 1422, 1422,
+ -20907, -20907, -20907, -20907, 27758, 27758, 27758, 27758,
+ -3799, -3799, -3799, -3799, -15690, -15690, -15690, -15690,
+ -171, -171, -171, -171, 622, 622, 622, 622,
+ 1577, 1577, 1577, 1577, 182, 182, 182, 182,
+ -5827, -5827, 17363, 17363, -26360, -26360, -29057, -29057,
+ 5571, 5571, -1102, -1102, 21438, 21438, -26242, -26242,
+ 573, 573, -1325, -1325, 264, 264, 383, 383,
+ -829, -829, 1458, 1458, -1602, -1602, -130, -130,
+ -5689, -6516, 1496, 30967, -23565, 20179, 20710, 25080,
+ -12796, 26616, 16064, -12442, 9134, -650, -25986, 27837,
+ 1223, 652, -552, 1015, -1293, 1491, -282, -1544,
+ 516, -8, -320, -666, -1618, -1162, 126, 1469,
+ -335, -11477, -32227, 20494, -27738, 945, -14883, 6182,
+ 32010, 10631, 29175, -28762, -18486, 17560, -14430, -5276,
+ -1103, 555, -1251, 1550, 422, 177, -291, 1574,
+ -246, 1159, -777, -602, -1590, -872, 418, -156,
+ 11182, 13387, -14233, -21655, 13131, -4587, 23092, 5493,
+ -32502, 30317, -18741, 12639, 20100, 18525, 19529, -12619,
+ 430, 843, 871, 105, 587, -235, -460, 1653,
+ 778, -147, 1483, 1119, 644, 349, 329, -75,
+ 787, 787, 787, 787, 787, 787, 787, 787,
+ 787, 787, 787, 787, 787, 787, 787, 787,
+ -1517, -1517, -1517, -1517, -1517, -1517, -1517, -1517,
+ -1517, -1517, -1517, -1517, -1517, -1517, -1517, -1517,
+ 28191, 28191, 28191, 28191, 28191, 28191, 28191, 28191,
+ -16694, -16694, -16694, -16694, -16694, -16694, -16694, -16694,
+ 287, 287, 287, 287, 287, 287, 287, 287,
+ 202, 202, 202, 202, 202, 202, 202, 202,
+ 10690, 10690, 10690, 10690, 1358, 1358, 1358, 1358,
+ -11202, -11202, -11202, -11202, 31164, 31164, 31164, 31164,
+ 962, 962, 962, 962, -1202, -1202, -1202, -1202,
+ -1474, -1474, -1474, -1474, 1468, 1468, 1468, 1468,
+ -28073, -28073, 24313, 24313, -10532, -10532, 8800, 8800,
+ 18426, 18426, 8859, 8859, 26675, 26675, -16163, -16163,
+ -681, -681, 1017, 1017, 732, 732, 608, 608,
+ -1542, -1542, 411, 411, -205, -205, -1571, -1571,
+ 19883, -28250, -15887, -8898, -28309, 9075, -30199, 18249,
+ 13426, 14017, -29156, -12757, 16832, 4311, -24155, -17915,
+ -853, -90, -271, 830, 107, -1421, -247, -951,
+ -398, 961, -1508, -725, 448, -1065, 677, -1275,
+ -31183, 25435, -7382, 24391, -20927, 10946, 24214, 16989,
+ 10335, -7934, -22502, 10906, 31636, 28644, 23998, -17422,
+ 817, 603, 1322, -1465, -1215, 1218, -874, -1187,
+ -1185, -1278, -1510, -870, -108, 996, 958, 1522,
+ 20297, 2146, 15355, -32384, -6280, -14903, -11044, 14469,
+ -21498, -20198, 23210, -17442, -23860, -20257, 7756, 23132,
+ 1097, 610, -1285, 384, -136, -1335, 220, -1659,
+ -1530, 794, -854, 478, -308, 991, -1460, 1628,
+
+#define _16XSHIFT 624
+ SHIFT, SHIFT, SHIFT, SHIFT, SHIFT, SHIFT, SHIFT, SHIFT,
+ SHIFT, SHIFT, SHIFT, SHIFT, SHIFT, SHIFT, SHIFT, SHIFT
+}};
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_consts_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_consts_avx2.h
new file mode 100644
index 0000000000..1983ba44d6
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_consts_avx2.h
@@ -0,0 +1,43 @@
+#pragma once
+
+#include "kyber512r3_params.h"
+
+#define _16XQ 0
+#define _16XQINV 16
+#define _16XV 32
+#define _16XFLO 48
+#define _16XFHI 64
+#define _16XMONTSQLO 80
+#define _16XMONTSQHI 96
+#define _16XMASK 112
+#define _REVIDXB 128
+#define _REVIDXD 144
+#define _ZETAS_EXP 160
+#define _16XSHIFT 624
+
+/* The C ABI on MacOS exports all symbols with a leading
+ * underscore. This means that any symbols we refer to from
+ * C files (functions) can't be found, and all symbols we
+ * refer to from ASM also can't be found.
+ *
+ * This define helps us get around this
+ */
+#ifdef __ASSEMBLER__
+#if defined(__WIN32__) || defined(__APPLE__)
+#define decorate(s) _##s
+#define cdecl2(s) decorate(s)
+#define cdecl(s) cdecl2(S2N_KYBER_512_R3_NAMESPACE(##s))
+#else
+#define cdecl(s) S2N_KYBER_512_R3_NAMESPACE(##s)
+#endif
+#endif
+
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#ifndef __ASSEMBLER__
+#include "kyber512r3_align_avx2.h"
+typedef ALIGNED_INT16(640) qdata_t;
+#define qdata S2N_KYBER_512_R3_NAMESPACE(qdata)
+extern const qdata_t qdata;
+#endif
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/fips202.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202.c
index 8289a526b3..c5ce0c91f2 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/fips202.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202.c
@@ -7,7 +7,9 @@
#include <stddef.h>
#include <stdint.h>
-#include "fips202.h"
+
+#include "kyber512r3_params.h"
+#include "kyber512r3_fips202.h"
#define NROUNDS 24
#define ROL(a, offset) (((a) << (offset)) ^ ((a) >> (64 - (offset))))
@@ -24,7 +26,7 @@
static uint64_t load64(const uint8_t *x) {
uint64_t r = 0;
for (size_t i = 0; i < 8; ++i) {
- r |= (uint64_t) x[i] << 8 * i;
+ r |= (uint64_t)x[i] << 8 * i;
}
return r;
@@ -46,18 +48,19 @@ static void store64(uint8_t *x, uint64_t u) {
/* Keccak round constants */
static const uint64_t KeccakF_RoundConstants[NROUNDS] = {
- 0x0000000000000001ULL, 0x0000000000008082ULL,
- 0x800000000000808aULL, 0x8000000080008000ULL,
- 0x000000000000808bULL, 0x0000000080000001ULL,
- 0x8000000080008081ULL, 0x8000000000008009ULL,
- 0x000000000000008aULL, 0x0000000000000088ULL,
- 0x0000000080008009ULL, 0x000000008000000aULL,
- 0x000000008000808bULL, 0x800000000000008bULL,
- 0x8000000000008089ULL, 0x8000000000008003ULL,
- 0x8000000000008002ULL, 0x8000000000000080ULL,
- 0x000000000000800aULL, 0x800000008000000aULL,
- 0x8000000080008081ULL, 0x8000000000008080ULL,
- 0x0000000080000001ULL, 0x8000000080008008ULL};
+ 0x0000000000000001ULL, 0x0000000000008082ULL,
+ 0x800000000000808aULL, 0x8000000080008000ULL,
+ 0x000000000000808bULL, 0x0000000080000001ULL,
+ 0x8000000080008081ULL, 0x8000000000008009ULL,
+ 0x000000000000008aULL, 0x0000000000000088ULL,
+ 0x0000000080008009ULL, 0x000000008000000aULL,
+ 0x000000008000808bULL, 0x800000000000008bULL,
+ 0x8000000000008089ULL, 0x8000000000008003ULL,
+ 0x8000000000008002ULL, 0x8000000000000080ULL,
+ 0x000000000000800aULL, 0x800000008000000aULL,
+ 0x8000000080008081ULL, 0x8000000000008080ULL,
+ 0x0000000080000001ULL, 0x8000000080008008ULL,
+};
/*************************************************
* Name: KeccakF1600_StatePermute
@@ -74,9 +77,8 @@ static void KeccakF1600_StatePermute(uint64_t *state) {
uint64_t Aka, Ake, Aki, Ako, Aku;
uint64_t Ama, Ame, Ami, Amo, Amu;
uint64_t Asa, Ase, Asi, Aso, Asu;
- uint64_t BCa, BCe, BCi, BCo, BCu;
- // copyFromState(A, state)
+ /* copyFromState(A, state) */
Aba = state[0];
Abe = state[1];
Abi = state[2];
@@ -104,6 +106,7 @@ static void KeccakF1600_StatePermute(uint64_t *state) {
Asu = state[24];
for (round = 0; round < NROUNDS; round += 2) {
+ uint64_t BCa, BCe, BCi, BCo, BCu;
uint64_t Da, De, Di, Do, Du;
uint64_t Eba, Ebe, Ebi, Ebo, Ebu;
uint64_t Ega, Ege, Egi, Ego, Egu;
@@ -111,14 +114,14 @@ static void KeccakF1600_StatePermute(uint64_t *state) {
uint64_t Ema, Eme, Emi, Emo, Emu;
uint64_t Esa, Ese, Esi, Eso, Esu;
- // prepareTheta
+ /* prepareTheta */
BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa;
BCe = Abe ^ Age ^ Ake ^ Ame ^ Ase;
BCi = Abi ^ Agi ^ Aki ^ Ami ^ Asi;
BCo = Abo ^ Ago ^ Ako ^ Amo ^ Aso;
BCu = Abu ^ Agu ^ Aku ^ Amu ^ Asu;
- // thetaRhoPiChiIotaPrepareTheta(round , A, E)
+ /* thetaRhoPiChiIotaPrepareTheta(round , A, E) */
Da = BCu ^ ROL(BCe, 1);
De = BCa ^ ROL(BCi, 1);
Di = BCe ^ ROL(BCo, 1);
@@ -206,14 +209,14 @@ static void KeccakF1600_StatePermute(uint64_t *state) {
Eso = BCo ^ ((~BCu) & BCa);
Esu = BCu ^ ((~BCa) & BCe);
- // prepareTheta
+ /* prepareTheta */
BCa = Eba ^ Ega ^ Eka ^ Ema ^ Esa;
BCe = Ebe ^ Ege ^ Eke ^ Eme ^ Ese;
BCi = Ebi ^ Egi ^ Eki ^ Emi ^ Esi;
BCo = Ebo ^ Ego ^ Eko ^ Emo ^ Eso;
BCu = Ebu ^ Egu ^ Eku ^ Emu ^ Esu;
- // thetaRhoPiChiIotaPrepareTheta(round+1, E, A)
+ /* thetaRhoPiChiIotaPrepareTheta(round+1, E, A) */
Da = BCu ^ ROL(BCe, 1);
De = BCa ^ ROL(BCi, 1);
Di = BCe ^ ROL(BCo, 1);
@@ -302,7 +305,7 @@ static void KeccakF1600_StatePermute(uint64_t *state) {
Asu = BCu ^ ((~BCa) & BCe);
}
- // copyToState(state, A)
+ /* copyToState(state, A) */
state[0] = Aba;
state[1] = Abe;
state[2] = Abi;
@@ -400,6 +403,37 @@ static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, uint64_t *s, uint32
}
/*************************************************
+ * Name: shake128_absorb
+ *
+ * Description: Absorb step of the SHAKE128 XOF.
+ * non-incremental, starts by zeroeing the state.
+ *
+ * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state
+ * - const uint8_t *input: pointer to input to be absorbed
+ * into s
+ * - size_t inlen: length of input in bytes
+ **************************************************/
+void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen) {
+ keccak_absorb(state->ctx, S2N_KYBER_512_R3_SHAKE128_RATE, input, inlen, 0x1F);
+}
+
+/*************************************************
+ * Name: shake128_squeezeblocks
+ *
+ * Description: Squeeze step of SHAKE128 XOF. Squeezes full blocks of
+ * SHAKE128_RATE bytes each. Modifies the state. Can be called
+ * multiple times to keep squeezing, i.e., is incremental.
+ *
+ * Arguments: - uint8_t *output: pointer to output blocks
+ * - size_t nblocks: number of blocks to be squeezed
+ * (written to output)
+ * - shake128ctx *state: pointer to input/output Keccak state
+ **************************************************/
+void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state) {
+ keccak_squeezeblocks(output, nblocks, state->ctx, S2N_KYBER_512_R3_SHAKE128_RATE);
+}
+
+/*************************************************
* Name: shake256_absorb
*
* Description: Absorb step of the SHAKE256 XOF.
@@ -410,8 +444,8 @@ static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, uint64_t *s, uint32
* into s
* - size_t inlen: length of input in bytes
**************************************************/
-static void shake256_absorb(shake256_ctx *state, const uint8_t *input, size_t inlen) {
- keccak_absorb(state->ctx, SHAKE256_RATE, input, inlen, 0x1F);
+void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen) {
+ keccak_absorb(state->ctx, S2N_KYBER_512_R3_SHAKE256_RATE, input, inlen, 0x1F);
}
/*************************************************
@@ -426,8 +460,8 @@ static void shake256_absorb(shake256_ctx *state, const uint8_t *input, size_t in
* (written to output)
* - shake256ctx *state: pointer to input/output Keccak state
**************************************************/
-static void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256_ctx *state) {
- keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE256_RATE);
+void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state) {
+ keccak_squeezeblocks(output, nblocks, state->ctx, S2N_KYBER_512_R3_SHAKE256_RATE);
}
/*************************************************
@@ -441,15 +475,15 @@ static void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256_ctx
* - size_t inlen: length of input in bytes
**************************************************/
void shake256(uint8_t *output, size_t outlen, const uint8_t *input, size_t inlen) {
- size_t nblocks = outlen / SHAKE256_RATE;
- uint8_t t[SHAKE256_RATE];
- shake256_ctx s;
+ size_t nblocks = outlen / S2N_KYBER_512_R3_SHAKE256_RATE;
+ uint8_t t[S2N_KYBER_512_R3_SHAKE256_RATE];
+ shake256ctx s;
shake256_absorb(&s, input, inlen);
shake256_squeezeblocks(output, nblocks, &s);
- output += nblocks * SHAKE256_RATE;
- outlen -= nblocks * SHAKE256_RATE;
+ output += nblocks * S2N_KYBER_512_R3_SHAKE256_RATE;
+ outlen -= nblocks * S2N_KYBER_512_R3_SHAKE256_RATE;
if (outlen) {
shake256_squeezeblocks(t, 1, &s);
@@ -459,3 +493,50 @@ void shake256(uint8_t *output, size_t outlen, const uint8_t *input, size_t inlen
}
}
+/*************************************************
+ * Name: sha3_256
+ *
+ * Description: SHA3-256 with non-incremental API
+ *
+ * Arguments: - uint8_t *output: pointer to output
+ * - const uint8_t *input: pointer to input
+ * - size_t inlen: length of input in bytes
+ **************************************************/
+void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen) {
+ uint64_t s[25];
+ uint8_t t[S2N_KYBER_512_R3_SHA3_256_RATE];
+
+ /* Absorb input */
+ keccak_absorb(s, S2N_KYBER_512_R3_SHA3_256_RATE, input, inlen, 0x06);
+
+ /* Squeeze output */
+ keccak_squeezeblocks(t, 1, s, S2N_KYBER_512_R3_SHA3_256_RATE);
+
+ for (size_t i = 0; i < 32; i++) {
+ output[i] = t[i];
+ }
+}
+
+/*************************************************
+ * Name: sha3_512
+ *
+ * Description: SHA3-512 with non-incremental API
+ *
+ * Arguments: - uint8_t *output: pointer to output
+ * - const uint8_t *input: pointer to input
+ * - size_t inlen: length of input in bytes
+ **************************************************/
+void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen) {
+ uint64_t s[25];
+ uint8_t t[S2N_KYBER_512_R3_SHA3_512_RATE];
+
+ /* Absorb input */
+ keccak_absorb(s, S2N_KYBER_512_R3_SHA3_512_RATE, input, inlen, 0x06);
+
+ /* Squeeze output */
+ keccak_squeezeblocks(t, 1, s, S2N_KYBER_512_R3_SHA3_512_RATE);
+
+ for (size_t i = 0; i < 64; i++) {
+ output[i] = t[i];
+ }
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202.h
new file mode 100644
index 0000000000..1f4f395f72
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202.h
@@ -0,0 +1,68 @@
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+#include "kyber512r3_params.h"
+
+#define S2N_KYBER_512_R3_SHAKE128_RATE 168
+#define S2N_KYBER_512_R3_SHAKE256_RATE 136
+#define S2N_KYBER_512_R3_SHA3_256_RATE 136
+#define S2N_KYBER_512_R3_SHA3_384_RATE 104
+#define S2N_KYBER_512_R3_SHA3_512_RATE 72
+
+#define S2N_KYBER_512_R3_PQC_SHAKECTX_SIZE 25
+
+/* Context for non-incremental API */
+#define shake128ctx S2N_KYBER_512_R3_NAMESPACE(shake128ctx)
+typedef struct {
+ uint64_t ctx[S2N_KYBER_512_R3_PQC_SHAKECTX_SIZE];
+} shake128ctx;
+
+/* Context for non-incremental API */
+#define shake256ctx S2N_KYBER_512_R3_NAMESPACE(shake256ctx)
+typedef struct {
+ uint64_t ctx[S2N_KYBER_512_R3_PQC_SHAKECTX_SIZE];
+} shake256ctx;
+
+/* Initialize the state and absorb the provided input.
+ *
+ * This function does not support being called multiple times
+ * with the same state.
+ */
+#define shake128_absorb S2N_KYBER_512_R3_NAMESPACE(shake128_absorb)
+void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen);
+/* Squeeze output out of the sponge.
+ *
+ * Supports being called multiple times
+ */
+#define shake128_squeezeblocks S2N_KYBER_512_R3_NAMESPACE(shake128_squeezeblocks)
+void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state);
+
+/* Copy the state. */
+#define shake128_ctx_clone S2N_KYBER_512_R3_NAMESPACE(shake128_ctx_clone)
+void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src);
+
+/* Initialize the state and absorb the provided input.
+ *
+ * This function does not support being called multiple times
+ * with the same state.
+ */
+#define shake256_absorb S2N_KYBER_512_R3_NAMESPACE(shake256_absorb)
+void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen);
+/* Squeeze output out of the sponge.
+ *
+ * Supports being called multiple times
+ */
+#define shake256_squeezeblocks S2N_KYBER_512_R3_NAMESPACE(shake256_squeezeblocks)
+void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state);
+
+/* One-stop SHAKE256 call */
+#define shake256 S2N_KYBER_512_R3_NAMESPACE(shake256)
+void shake256(uint8_t *output, size_t outlen, const uint8_t *input, size_t inlen);
+
+#define sha3_256 S2N_KYBER_512_R3_NAMESPACE(sha3_256)
+void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen);
+
+/* One-stop SHA3-512 shop */
+#define sha3_512 S2N_KYBER_512_R3_NAMESPACE(sha3_512)
+void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202x4_avx2.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202x4_avx2.c
new file mode 100644
index 0000000000..5f07fb44a3
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202x4_avx2.c
@@ -0,0 +1,210 @@
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include "kyber512r3_fips202.h"
+#include "kyber512r3_fips202x4_avx2.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#include <immintrin.h>
+
+#define KeccakF1600_StatePermute4x S2N_KYBER_512_R3_NAMESPACE(KeccakP1600times4_PermuteAll_24rounds)
+extern void KeccakF1600_StatePermute4x(__m256i *s);
+
+/* Implementation is used from Crystal Kyber Repository
+ * See for more details: https://github.com/XKCP/XKCP */
+
+static void keccakx4_absorb_once(__m256i s[25],
+ unsigned int r,
+ const uint8_t *in0,
+ const uint8_t *in1,
+ const uint8_t *in2,
+ const uint8_t *in3,
+ size_t inlen,
+ uint8_t p)
+{
+ size_t i;
+ uint64_t pos = 0;
+ __m256i t, idx;
+
+ for(i = 0; i < 25; ++i)
+ s[i] = _mm256_setzero_si256();
+
+ idx = _mm256_set_epi64x((long long)in3, (long long)in2, (long long)in1, (long long)in0);
+ while(inlen >= r) {
+ for(i = 0; i < r/8; ++i) {
+ t = _mm256_i64gather_epi64((long long *)pos, idx, 1);
+ s[i] = _mm256_xor_si256(s[i], t);
+ pos += 8;
+ }
+ inlen -= r;
+
+ KeccakF1600_StatePermute4x(s);
+ }
+
+ for(i = 0; i < inlen/8; ++i) {
+ t = _mm256_i64gather_epi64((long long *)pos, idx, 1);
+ s[i] = _mm256_xor_si256(s[i], t);
+ pos += 8;
+ }
+ inlen -= 8*i;
+
+ if(inlen) {
+ t = _mm256_i64gather_epi64((long long *)pos, idx, 1);
+ idx = _mm256_set1_epi64x((1ULL << (8*inlen)) - 1);
+ t = _mm256_and_si256(t, idx);
+ s[i] = _mm256_xor_si256(s[i], t);
+ }
+
+ t = _mm256_set1_epi64x((uint64_t)p << 8*inlen);
+ s[i] = _mm256_xor_si256(s[i], t);
+ t = _mm256_set1_epi64x(1ULL << 63);
+ s[r/8 - 1] = _mm256_xor_si256(s[r/8 - 1], t);
+}
+
+static void keccakx4_squeezeblocks(uint8_t *out0,
+ uint8_t *out1,
+ uint8_t *out2,
+ uint8_t *out3,
+ size_t nblocks,
+ unsigned int r,
+ __m256i s[25])
+{
+ unsigned int i;
+ __m128d t;
+
+ while(nblocks > 0) {
+ KeccakF1600_StatePermute4x(s);
+ for(i=0; i < r/8; ++i) {
+ t = _mm_castsi128_pd(_mm256_castsi256_si128(s[i]));
+ // correcting cast-align errors
+ // old version: _mm_storel_pd((__attribute__((__may_alias__)) double *)&out0[8*i], t);
+ _mm_storel_pd((__attribute__((__may_alias__)) void *)&out0[8*i], t);
+ // old version: _mm_storeh_pd((__attribute__((__may_alias__)) double *)&out1[8*i], t);
+ _mm_storeh_pd((__attribute__((__may_alias__)) void *)&out1[8*i], t);
+ t = _mm_castsi128_pd(_mm256_extracti128_si256(s[i],1));
+ // old version: _mm_storel_pd((__attribute__((__may_alias__)) double *)&out2[8*i], t);
+ _mm_storel_pd((__attribute__((__may_alias__)) void *)&out2[8*i], t);
+ // old version: _mm_storeh_pd((__attribute__((__may_alias__)) double *)&out3[8*i], t);
+ _mm_storeh_pd((__attribute__((__may_alias__)) void *)&out3[8*i], t);
+ }
+
+ out0 += r;
+ out1 += r;
+ out2 += r;
+ out3 += r;
+ --nblocks;
+ }
+}
+
+void shake128x4_absorb_once(keccakx4_state *state,
+ const uint8_t *in0,
+ const uint8_t *in1,
+ const uint8_t *in2,
+ const uint8_t *in3,
+ size_t inlen)
+{
+ keccakx4_absorb_once(state->s, S2N_KYBER_512_R3_SHAKE128_RATE, in0, in1, in2, in3, inlen, 0x1F);
+}
+
+void shake128x4_squeezeblocks(uint8_t *out0,
+ uint8_t *out1,
+ uint8_t *out2,
+ uint8_t *out3,
+ size_t nblocks,
+ keccakx4_state *state)
+{
+ keccakx4_squeezeblocks(out0, out1, out2, out3, nblocks, S2N_KYBER_512_R3_SHAKE128_RATE, state->s);
+}
+
+void shake256x4_absorb_once(keccakx4_state *state,
+ const uint8_t *in0,
+ const uint8_t *in1,
+ const uint8_t *in2,
+ const uint8_t *in3,
+ size_t inlen)
+{
+ keccakx4_absorb_once(state->s, S2N_KYBER_512_R3_SHAKE256_RATE, in0, in1, in2, in3, inlen, 0x1F);
+}
+
+void shake256x4_squeezeblocks(uint8_t *out0,
+ uint8_t *out1,
+ uint8_t *out2,
+ uint8_t *out3,
+ size_t nblocks,
+ keccakx4_state *state)
+{
+ keccakx4_squeezeblocks(out0, out1, out2, out3, nblocks, S2N_KYBER_512_R3_SHAKE256_RATE, state->s);
+}
+
+void shake128x4(uint8_t *out0,
+ uint8_t *out1,
+ uint8_t *out2,
+ uint8_t *out3,
+ size_t outlen,
+ const uint8_t *in0,
+ const uint8_t *in1,
+ const uint8_t *in2,
+ const uint8_t *in3,
+ size_t inlen)
+{
+ unsigned int i;
+ size_t nblocks = outlen/S2N_KYBER_512_R3_SHAKE128_RATE;
+ uint8_t t[4][S2N_KYBER_512_R3_SHAKE128_RATE];
+ keccakx4_state state;
+
+ shake128x4_absorb_once(&state, in0, in1, in2, in3, inlen);
+ shake128x4_squeezeblocks(out0, out1, out2, out3, nblocks, &state);
+
+ out0 += nblocks*S2N_KYBER_512_R3_SHAKE128_RATE;
+ out1 += nblocks*S2N_KYBER_512_R3_SHAKE128_RATE;
+ out2 += nblocks*S2N_KYBER_512_R3_SHAKE128_RATE;
+ out3 += nblocks*S2N_KYBER_512_R3_SHAKE128_RATE;
+ outlen -= nblocks*S2N_KYBER_512_R3_SHAKE128_RATE;
+
+ if(outlen) {
+ shake128x4_squeezeblocks(t[0], t[1], t[2], t[3], 1, &state);
+ for(i = 0; i < outlen; ++i) {
+ out0[i] = t[0][i];
+ out1[i] = t[1][i];
+ out2[i] = t[2][i];
+ out3[i] = t[3][i];
+ }
+ }
+}
+
+void shake256x4(uint8_t *out0,
+ uint8_t *out1,
+ uint8_t *out2,
+ uint8_t *out3,
+ size_t outlen,
+ const uint8_t *in0,
+ const uint8_t *in1,
+ const uint8_t *in2,
+ const uint8_t *in3,
+ size_t inlen)
+{
+ unsigned int i;
+ size_t nblocks = outlen/S2N_KYBER_512_R3_SHAKE256_RATE;
+ uint8_t t[4][S2N_KYBER_512_R3_SHAKE256_RATE];
+ keccakx4_state state;
+
+ shake256x4_absorb_once(&state, in0, in1, in2, in3, inlen);
+ shake256x4_squeezeblocks(out0, out1, out2, out3, nblocks, &state);
+
+ out0 += nblocks*S2N_KYBER_512_R3_SHAKE256_RATE;
+ out1 += nblocks*S2N_KYBER_512_R3_SHAKE256_RATE;
+ out2 += nblocks*S2N_KYBER_512_R3_SHAKE256_RATE;
+ out3 += nblocks*S2N_KYBER_512_R3_SHAKE256_RATE;
+ outlen -= nblocks*S2N_KYBER_512_R3_SHAKE256_RATE;
+
+ if(outlen) {
+ shake256x4_squeezeblocks(t[0], t[1], t[2], t[3], 1, &state);
+ for(i = 0; i < outlen; ++i) {
+ out0[i] = t[0][i];
+ out1[i] = t[1][i];
+ out2[i] = t[2][i];
+ out3[i] = t[3][i];
+ }
+ }
+}
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202x4_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202x4_avx2.h
new file mode 100644
index 0000000000..8c4896724c
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fips202x4_avx2.h
@@ -0,0 +1,70 @@
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+#include "kyber512r3_params.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#include <immintrin.h>
+
+#define keccakx4_state S2N_KYBER_512_R3_NAMESPACE(keccakx4_state)
+typedef struct {
+ __m256i s[25];
+} keccakx4_state;
+
+#define shake128x4_absorb_once S2N_KYBER_512_R3_NAMESPACE(shake128x4_absorb_once)
+void shake128x4_absorb_once(keccakx4_state *state,
+ const uint8_t *in0,
+ const uint8_t *in1,
+ const uint8_t *in2,
+ const uint8_t *in3,
+ size_t inlen);
+
+#define shake128x4_squeezeblocks S2N_KYBER_512_R3_NAMESPACE(shake128x4_squeezeblocks)
+void shake128x4_squeezeblocks(uint8_t *out0,
+ uint8_t *out1,
+ uint8_t *out2,
+ uint8_t *out3,
+ size_t nblocks,
+ keccakx4_state *state);
+
+#define shake256x4_absorb_once S2N_KYBER_512_R3_NAMESPACE(shake256x4_absorb_once)
+void shake256x4_absorb_once(keccakx4_state *state,
+ const uint8_t *in0,
+ const uint8_t *in1,
+ const uint8_t *in2,
+ const uint8_t *in3,
+ size_t inlen);
+
+#define shake256x4_squeezeblocks S2N_KYBER_512_R3_NAMESPACE(shake256x4_squeezeblocks)
+void shake256x4_squeezeblocks(uint8_t *out0,
+ uint8_t *out1,
+ uint8_t *out2,
+ uint8_t *out3,
+ size_t nblocks,
+ keccakx4_state *state);
+
+#define shake128x4 S2N_KYBER_512_R3_NAMESPACE(shake128x4)
+void shake128x4(uint8_t *out0,
+ uint8_t *out1,
+ uint8_t *out2,
+ uint8_t *out3,
+ size_t outlen,
+ const uint8_t *in0,
+ const uint8_t *in1,
+ const uint8_t *in2,
+ const uint8_t *in3,
+ size_t inlen);
+
+#define shake256x4 S2N_KYBER_512_R3_NAMESPACE(shake256x4)
+void shake256x4(uint8_t *out0,
+ uint8_t *out1,
+ uint8_t *out2,
+ uint8_t *out3,
+ size_t outlen,
+ const uint8_t *in0,
+ const uint8_t *in1,
+ const uint8_t *in2,
+ const uint8_t *in3,
+ size_t inlen);
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fq_avx2.S b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fq_avx2.S
new file mode 100644
index 0000000000..3492489a67
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_fq_avx2.S
@@ -0,0 +1,122 @@
+#include "kyber512r3_consts_avx2.h"
+
+// The small macros (.inc files) are combined with .S files directly
+/*****.include "fq.inc"*****/
+/***************************/
+.macro red16 r,rs=0,x=12
+vpmulhw %ymm1,%ymm\r,%ymm\x
+.if \rs
+vpmulhrsw %ymm\rs,%ymm\x,%ymm\x
+.else
+vpsraw $10,%ymm\x,%ymm\x
+.endif
+vpmullw %ymm0,%ymm\x,%ymm\x
+vpsubw %ymm\x,%ymm\r,%ymm\r
+.endm
+
+.macro csubq r,x=12
+vpsubw %ymm0,%ymm\r,%ymm\r
+vpsraw $15,%ymm\r,%ymm\x
+vpand %ymm0,%ymm\x,%ymm\x
+vpaddw %ymm\x,%ymm\r,%ymm\r
+.endm
+
+.macro caddq r,x=12
+vpsraw $15,%ymm\r,%ymm\x
+vpand %ymm0,%ymm\x,%ymm\x
+vpaddw %ymm\x,%ymm\r,%ymm\r
+.endm
+
+.macro fqmulprecomp al,ah,b,x=12
+vpmullw %ymm\al,%ymm\b,%ymm\x
+vpmulhw %ymm\ah,%ymm\b,%ymm\b
+vpmulhw %ymm0,%ymm\x,%ymm\x
+vpsubw %ymm\x,%ymm\b,%ymm\b
+.endm
+/***************************/
+
+.text
+reduce128_avx:
+#load
+vmovdqa (%rdi),%ymm2
+vmovdqa 32(%rdi),%ymm3
+vmovdqa 64(%rdi),%ymm4
+vmovdqa 96(%rdi),%ymm5
+vmovdqa 128(%rdi),%ymm6
+vmovdqa 160(%rdi),%ymm7
+vmovdqa 192(%rdi),%ymm8
+vmovdqa 224(%rdi),%ymm9
+
+red16 2
+red16 3
+red16 4
+red16 5
+red16 6
+red16 7
+red16 8
+red16 9
+
+#store
+vmovdqa %ymm2,(%rdi)
+vmovdqa %ymm3,32(%rdi)
+vmovdqa %ymm4,64(%rdi)
+vmovdqa %ymm5,96(%rdi)
+vmovdqa %ymm6,128(%rdi)
+vmovdqa %ymm7,160(%rdi)
+vmovdqa %ymm8,192(%rdi)
+vmovdqa %ymm9,224(%rdi)
+
+ret
+
+.global cdecl(reduce_avx2_asm)
+cdecl(reduce_avx2_asm):
+#consts
+vmovdqa _16XQ*2(%rsi),%ymm0
+vmovdqa _16XV*2(%rsi),%ymm1
+call reduce128_avx
+add $256,%rdi
+call reduce128_avx
+ret
+
+tomont128_avx:
+#load
+vmovdqa (%rdi),%ymm3
+vmovdqa 32(%rdi),%ymm4
+vmovdqa 64(%rdi),%ymm5
+vmovdqa 96(%rdi),%ymm6
+vmovdqa 128(%rdi),%ymm7
+vmovdqa 160(%rdi),%ymm8
+vmovdqa 192(%rdi),%ymm9
+vmovdqa 224(%rdi),%ymm10
+
+fqmulprecomp 1,2,3,11
+fqmulprecomp 1,2,4,12
+fqmulprecomp 1,2,5,13
+fqmulprecomp 1,2,6,14
+fqmulprecomp 1,2,7,15
+fqmulprecomp 1,2,8,11
+fqmulprecomp 1,2,9,12
+fqmulprecomp 1,2,10,13
+
+#store
+vmovdqa %ymm3,(%rdi)
+vmovdqa %ymm4,32(%rdi)
+vmovdqa %ymm5,64(%rdi)
+vmovdqa %ymm6,96(%rdi)
+vmovdqa %ymm7,128(%rdi)
+vmovdqa %ymm8,160(%rdi)
+vmovdqa %ymm9,192(%rdi)
+vmovdqa %ymm10,224(%rdi)
+
+ret
+
+.global cdecl(tomont_avx2_asm)
+cdecl(tomont_avx2_asm):
+#consts
+vmovdqa _16XQ*2(%rsi),%ymm0
+vmovdqa _16XMONTSQLO*2(%rsi),%ymm1
+vmovdqa _16XMONTSQHI*2(%rsi),%ymm2
+call tomont128_avx
+add $256,%rdi
+call tomont128_avx
+ret
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa.c
new file mode 100644
index 0000000000..ace1783448
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa.c
@@ -0,0 +1,323 @@
+#include <stddef.h>
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_indcpa.h"
+#include "kyber512r3_poly.h"
+#include "kyber512r3_polyvec.h"
+#include "kyber512r3_fips202.h"
+#include "kyber512r3_symmetric.h"
+#include "pq-crypto/s2n_pq_random.h"
+#include "utils/s2n_safety.h"
+
+/*************************************************
+* Name: pack_pk
+*
+* Description: Serialize the public key as concatenation of the
+* serialized vector of polynomials pk
+* and the public seed used to generate the matrix A.
+*
+* Arguments: uint8_t *r: pointer to the output serialized public key
+* polyvec *pk: pointer to the input public-key polyvec
+* const uint8_t *seed: pointer to the input public seed
+**************************************************/
+static void pack_pk(uint8_t r[S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES], polyvec *pk, const uint8_t seed[S2N_KYBER_512_R3_SYMBYTES]) {
+ polyvec_tobytes(r, pk);
+ for (size_t i = 0; i < S2N_KYBER_512_R3_SYMBYTES; i++) {
+ r[i + S2N_KYBER_512_R3_POLYVECBYTES] = seed[i];
+ }
+}
+
+/*************************************************
+* Name: unpack_pk
+*
+* Description: De-serialize public key from a byte array;
+* approximate inverse of pack_pk
+*
+* Arguments: - polyvec *pk: pointer to output public-key
+* polynomial vector
+* - uint8_t *seed: pointer to output seed to generate
+* matrix A
+* - const uint8_t *packedpk: pointer to input serialized public key
+**************************************************/
+static void unpack_pk(polyvec *pk, uint8_t seed[S2N_KYBER_512_R3_SYMBYTES], const uint8_t packedpk[S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES]) {
+ polyvec_frombytes(pk, packedpk);
+ for (size_t i = 0; i < S2N_KYBER_512_R3_SYMBYTES; i++) {
+ seed[i] = packedpk[i + S2N_KYBER_512_R3_POLYVECBYTES];
+ }
+}
+
+/*************************************************
+* Name: pack_sk
+*
+* Description: Serialize the secret key
+*
+* Arguments: - uint8_t *r: pointer to output serialized secret key
+* - polyvec *sk: pointer to input vector of polynomials (secret key)
+**************************************************/
+static void pack_sk(uint8_t r[S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES], polyvec *sk) {
+ polyvec_tobytes(r, sk);
+}
+
+/*************************************************
+* Name: unpack_sk
+*
+* Description: De-serialize the secret key;
+* inverse of pack_sk
+*
+* Arguments: - polyvec *sk: pointer to output vector of
+* polynomials (secret key)
+* - const uint8_t *packedsk: pointer to input serialized secret key
+**************************************************/
+static void unpack_sk(polyvec *sk, const uint8_t packedsk[S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES]) {
+ polyvec_frombytes(sk, packedsk);
+}
+
+/*************************************************
+* Name: pack_ciphertext
+*
+* Description: Serialize the ciphertext as concatenation of the
+* compressed and serialized vector of polynomials b
+* and the compressed and serialized polynomial v
+*
+* Arguments: uint8_t *r: pointer to the output serialized ciphertext
+* poly *pk: pointer to the input vector of polynomials b
+* poly *v: pointer to the input polynomial v
+**************************************************/
+static void pack_ciphertext(uint8_t r[S2N_KYBER_512_R3_INDCPA_BYTES], polyvec *b, poly *v) {
+ polyvec_compress(r, b);
+ poly_compress(r + S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES, v);
+}
+
+/*************************************************
+* Name: unpack_ciphertext
+*
+* Description: De-serialize and decompress ciphertext from a byte array;
+* approximate inverse of pack_ciphertext
+*
+* Arguments: - polyvec *b: pointer to the output vector of polynomials b
+* - poly *v: pointer to the output polynomial v
+* - const uint8_t *c: pointer to the input serialized ciphertext
+**************************************************/
+static void unpack_ciphertext(polyvec *b, poly *v, const uint8_t c[S2N_KYBER_512_R3_INDCPA_BYTES]) {
+ polyvec_decompress(b, c);
+ poly_decompress(v, c + S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES);
+}
+
+/*************************************************
+* Name: rej_uniform
+*
+* Description: Run rejection sampling on uniform random bytes to generate
+* uniform random integers mod q
+*
+* Arguments: - int16_t *r: pointer to output buffer
+* - unsigned int len: requested number of 16-bit integers
+* (uniform mod q)
+* - const uint8_t *buf: pointer to input buffer
+* (assumed to be uniform random bytes)
+* - unsigned int buflen: length of input buffer in bytes
+*
+* Returns number of sampled 16-bit integers (at most len)
+**************************************************/
+static unsigned int rej_uniform(int16_t *r, unsigned int len, const uint8_t *buf, unsigned int buflen) {
+ unsigned int ctr, pos;
+
+ ctr = pos = 0;
+ while (ctr < len && pos + 3 <= buflen) {
+ uint16_t val0 = ((buf[pos + 0] >> 0) | ((uint16_t)buf[pos + 1] << 8)) & 0xFFF;
+ uint16_t val1 = ((buf[pos + 1] >> 4) | ((uint16_t)buf[pos + 2] << 4)) & 0xFFF;
+ pos += 3;
+
+ if (val0 < S2N_KYBER_512_R3_Q) {
+ r[ctr++] = val0;
+ }
+ if (ctr < len && val1 < S2N_KYBER_512_R3_Q) {
+ r[ctr++] = val1;
+ }
+ }
+
+ return ctr;
+}
+
+/*************************************************
+* Name: gen_matrix
+*
+* Description: Deterministically generate matrix A (or the transpose of A)
+* from a seed. Entries of the matrix are polynomials that look
+* uniformly random. Performs rejection sampling on output of
+* a XOF
+*
+* Arguments: - polyvec *a: pointer to ouptput matrix A
+* - const uint8_t *seed: pointer to input seed
+* - int transposed: boolean deciding whether A or A^T
+* is generated
+**************************************************/
+#define XOF_BLOCKBYTES 168
+#define GEN_MATRIX_NBLOCKS ((12*S2N_KYBER_512_R3_N/8*(1 << 12)/S2N_KYBER_512_R3_Q + XOF_BLOCKBYTES)/XOF_BLOCKBYTES)
+static void gen_matrix(polyvec *a, const uint8_t seed[S2N_KYBER_512_R3_SYMBYTES], int transposed) {
+ unsigned int ctr, buflen, off;
+ uint8_t buf[GEN_MATRIX_NBLOCKS * XOF_BLOCKBYTES + 2];
+ xof_state state;
+
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ for (unsigned int j = 0; j < S2N_KYBER_512_R3_K; j++) {
+ if (transposed) {
+ kyber_shake128_absorb(&state, seed, i, j);
+ } else {
+ kyber_shake128_absorb(&state, seed, j, i);
+ }
+
+ shake128_squeezeblocks(buf, GEN_MATRIX_NBLOCKS, &state);
+ buflen = GEN_MATRIX_NBLOCKS * XOF_BLOCKBYTES;
+ ctr = rej_uniform(a[i].vec[j].coeffs, S2N_KYBER_512_R3_N, buf, buflen);
+
+ while (ctr < S2N_KYBER_512_R3_N) {
+ off = buflen % 3;
+ for (unsigned int k = 0; k < off; k++) {
+ buf[k] = buf[buflen - off + k];
+ }
+ shake128_squeezeblocks(buf + off, 1, &state);
+ buflen = off + XOF_BLOCKBYTES;
+ ctr += rej_uniform(a[i].vec[j].coeffs + ctr, S2N_KYBER_512_R3_N - ctr, buf, buflen);
+ }
+ }
+ }
+}
+
+/*************************************************
+* Name: indcpa_keypair
+*
+* Description: Generates public and private key for the CPA-secure
+* public-key encryption scheme underlying Kyber
+*
+* Arguments: - uint8_t *pk: pointer to output public key
+* (of length S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES bytes)
+* - uint8_t *sk: pointer to output private key
+* (of length S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES bytes)
+*
+* Returns: 0 on success
+* !0 on failure
+**************************************************/
+int indcpa_keypair(uint8_t pk[S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES], uint8_t sk[S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES]) {
+ uint8_t buf[2 * S2N_KYBER_512_R3_SYMBYTES];
+ const uint8_t *publicseed = buf;
+ const uint8_t *noiseseed = buf + S2N_KYBER_512_R3_SYMBYTES;
+ uint8_t nonce = 0;
+ polyvec a[S2N_KYBER_512_R3_K], e, pkpv, skpv;
+
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(buf, S2N_KYBER_512_R3_SYMBYTES));
+ sha3_512(buf, buf, S2N_KYBER_512_R3_SYMBYTES);
+
+ gen_matrix(a, publicseed, 0);
+
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ poly_getnoise_eta1(&skpv.vec[i], noiseseed, nonce++);
+ }
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ poly_getnoise_eta1(&e.vec[i], noiseseed, nonce++);
+ }
+
+ polyvec_ntt(&skpv);
+ polyvec_ntt(&e);
+
+ //* matrix-vector multiplication */
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ polyvec_pointwise_acc_montgomery(&pkpv.vec[i], &a[i], &skpv);
+ poly_tomont(&pkpv.vec[i]);
+ }
+
+ polyvec_add(&pkpv, &pkpv, &e);
+ polyvec_reduce(&pkpv);
+
+ pack_sk(sk, &skpv);
+ pack_pk(pk, &pkpv, publicseed);
+
+ return 0;
+}
+
+/*************************************************
+* Name: indcpa_enc
+*
+* Description: Encryption function of the CPA-secure
+* public-key encryption scheme underlying Kyber.
+*
+* Arguments: - uint8_t *c: pointer to output ciphertext
+* (of length S2N_KYBER_512_R3_INDCPA_BYTES bytes)
+* - const uint8_t *m: pointer to input message
+* (of length S2N_KYBER_512_R3_INDCPA_MSGBYTES bytes)
+* - const uint8_t *pk: pointer to input public key
+* (of length S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES)
+* - const uint8_t *coins: pointer to input random coins
+* used as seed (of length S2N_KYBER_512_R3_SYMBYTES)
+* to deterministically generate all
+* randomness
+**************************************************/
+void indcpa_enc(uint8_t c[S2N_KYBER_512_R3_INDCPA_BYTES], const uint8_t m[S2N_KYBER_512_R3_INDCPA_MSGBYTES],
+ const uint8_t pk[S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES], const uint8_t coins[S2N_KYBER_512_R3_SYMBYTES]) {
+ uint8_t seed[S2N_KYBER_512_R3_SYMBYTES];
+ uint8_t nonce = 0;
+ polyvec sp, pkpv, ep, at[S2N_KYBER_512_R3_K], bp;
+ poly v, k, epp;
+
+ unpack_pk(&pkpv, seed, pk);
+ poly_frommsg(&k, m);
+ gen_matrix(at, seed, 1);
+
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ poly_getnoise_eta1(sp.vec + i, coins, nonce++);
+ }
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ poly_getnoise_eta2(ep.vec + i, coins, nonce++);
+ }
+ poly_getnoise_eta2(&epp, coins, nonce++);
+
+ polyvec_ntt(&sp);
+
+ /* matrix-vector multiplication */
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ polyvec_pointwise_acc_montgomery(&bp.vec[i], &at[i], &sp);
+ }
+
+ polyvec_pointwise_acc_montgomery(&v, &pkpv, &sp);
+
+ polyvec_invntt_tomont(&bp);
+ poly_invntt_tomont(&v);
+
+ polyvec_add(&bp, &bp, &ep);
+ poly_add(&v, &v, &epp);
+ poly_add(&v, &v, &k);
+ polyvec_reduce(&bp);
+ poly_reduce(&v);
+
+ pack_ciphertext(c, &bp, &v);
+}
+
+/*************************************************
+* Name: indcpa_dec
+*
+* Description: Decryption function of the CPA-secure
+* public-key encryption scheme underlying Kyber.
+*
+* Arguments: - uint8_t *m: pointer to output decrypted message
+* (of length S2N_KYBER_512_R3_INDCPA_MSGBYTES)
+* - const uint8_t *c: pointer to input ciphertext
+* (of length S2N_KYBER_512_R3_INDCPA_BYTES)
+* - const uint8_t *sk: pointer to input secret key
+* (of length S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES)
+**************************************************/
+void indcpa_dec(uint8_t m[S2N_KYBER_512_R3_INDCPA_MSGBYTES], const uint8_t c[S2N_KYBER_512_R3_INDCPA_BYTES],
+ const uint8_t sk[S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES]) {
+ polyvec bp, skpv;
+ poly v, mp;
+
+ unpack_ciphertext(&bp, &v, c);
+ unpack_sk(&skpv, sk);
+
+ polyvec_ntt(&bp);
+ polyvec_pointwise_acc_montgomery(&mp, &skpv, &bp);
+ poly_invntt_tomont(&mp);
+
+ poly_sub(&mp, &v, &mp);
+ poly_reduce(&mp);
+
+ poly_tomsg(m, &mp);
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa.h
new file mode 100644
index 0000000000..f8b9e401a0
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa.h
@@ -0,0 +1,15 @@
+#pragma once
+
+#include <stdint.h>
+#include "kyber512r3_params.h"
+
+#define indcpa_keypair S2N_KYBER_512_R3_NAMESPACE(indcpa_keypair)
+int indcpa_keypair(uint8_t pk[S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES], uint8_t sk[S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES]);
+
+#define indcpa_enc S2N_KYBER_512_R3_NAMESPACE(indcpa_enc)
+void indcpa_enc(uint8_t c[S2N_KYBER_512_R3_INDCPA_BYTES], const uint8_t m[S2N_KYBER_512_R3_INDCPA_MSGBYTES],
+ const uint8_t pk[S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES], const uint8_t coins[S2N_KYBER_512_R3_SYMBYTES]);
+
+#define indcpa_dec S2N_KYBER_512_R3_NAMESPACE(indcpa_dec)
+void indcpa_dec(uint8_t m[S2N_KYBER_512_R3_INDCPA_MSGBYTES], const uint8_t c[S2N_KYBER_512_R3_INDCPA_BYTES],
+ const uint8_t sk[S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES]);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa_avx2.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa_avx2.c
new file mode 100644
index 0000000000..91e7513881
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa_avx2.c
@@ -0,0 +1,363 @@
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include "kyber512r3_align_avx2.h"
+#include "kyber512r3_params.h"
+#include "kyber512r3_indcpa_avx2.h"
+#include "kyber512r3_polyvec_avx2.h"
+#include "kyber512r3_poly_avx2.h"
+#include "kyber512r3_rejsample_avx2.h"
+#include "kyber512r3_fips202.h"
+#include "kyber512r3_fips202x4_avx2.h"
+#include "pq-crypto/s2n_pq_random.h"
+#include "utils/s2n_safety.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#include <immintrin.h>
+
+/*************************************************
+* Name: pack_pk
+*
+* Description: Serialize the public key as concatenation of the
+* serialized vector of polynomials pk and the
+* public seed used to generate the matrix A.
+* The polynomial coefficients in pk are assumed to
+* lie in the invertal [0,q], i.e. pk must be reduced
+* by polyvec_reduce_avx2().
+*
+* Arguments: uint8_t *r: pointer to the output serialized public key
+* polyvec *pk: pointer to the input public-key polyvec
+* const uint8_t *seed: pointer to the input public seed
+**************************************************/
+static void pack_pk(uint8_t r[S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES],
+ polyvec *pk,
+ const uint8_t seed[S2N_KYBER_512_R3_SYMBYTES])
+{
+ polyvec_tobytes_avx2(r, pk);
+ memcpy(r+S2N_KYBER_512_R3_POLYVECBYTES, seed, S2N_KYBER_512_R3_SYMBYTES);
+}
+
+/*************************************************
+* Name: unpack_pk
+*
+* Description: De-serialize public key from a byte array;
+* approximate inverse of pack_pk
+*
+* Arguments: - polyvec *pk: pointer to output public-key polynomial vector
+* - uint8_t *seed: pointer to output seed to generate matrix A
+* - const uint8_t *packedpk: pointer to input serialized public key
+**************************************************/
+static void unpack_pk(polyvec *pk,
+ uint8_t seed[S2N_KYBER_512_R3_SYMBYTES],
+ const uint8_t packedpk[S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES])
+{
+ polyvec_frombytes_avx2(pk, packedpk);
+ memcpy(seed, packedpk+S2N_KYBER_512_R3_POLYVECBYTES, S2N_KYBER_512_R3_SYMBYTES);
+}
+
+/*************************************************
+* Name: pack_sk
+*
+* Description: Serialize the secret key.
+* The polynomial coefficients in sk are assumed to
+* lie in the invertal [0,q], i.e. sk must be reduced
+* by polyvec_reduce_avx2().
+*
+* Arguments: - uint8_t *r: pointer to output serialized secret key
+* - polyvec *sk: pointer to input vector of polynomials (secret key)
+**************************************************/
+static void pack_sk(uint8_t r[S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES], polyvec *sk)
+{
+ polyvec_tobytes_avx2(r, sk);
+}
+
+/*************************************************
+* Name: unpack_sk
+*
+* Description: De-serialize the secret key; inverse of pack_sk
+*
+* Arguments: - polyvec *sk: pointer to output vector of polynomials (secret key)
+* - const uint8_t *packedsk: pointer to input serialized secret key
+**************************************************/
+static void unpack_sk(polyvec *sk, const uint8_t packedsk[S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES])
+{
+ polyvec_frombytes_avx2(sk, packedsk);
+}
+
+/*************************************************
+* Name: pack_ciphertext
+*
+* Description: Serialize the ciphertext as concatenation of the
+* compressed and serialized vector of polynomials b
+* and the compressed and serialized polynomial v.
+* The polynomial coefficients in b and v are assumed to
+* lie in the invertal [0,q], i.e. b and v must be reduced
+* by polyvec_reduce_avx2() and poly_reduce_avx2(), respectively.
+*
+* Arguments: uint8_t *r: pointer to the output serialized ciphertext
+* poly *pk: pointer to the input vector of polynomials b
+* poly *v: pointer to the input polynomial v
+**************************************************/
+static void pack_ciphertext(uint8_t r[S2N_KYBER_512_R3_INDCPA_BYTES], polyvec *b, poly *v)
+{
+ polyvec_compress_avx2(r, b);
+ poly_compress_avx2(r+S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES, v);
+}
+
+/*************************************************
+* Name: unpack_ciphertext
+*
+* Description: De-serialize and decompress ciphertext from a byte array;
+* approximate inverse of pack_ciphertext
+*
+* Arguments: - polyvec *b: pointer to the output vector of polynomials b
+* - poly *v: pointer to the output polynomial v
+* - const uint8_t *c: pointer to the input serialized ciphertext
+**************************************************/
+static void unpack_ciphertext(polyvec *b, poly *v, const uint8_t c[S2N_KYBER_512_R3_INDCPA_BYTES])
+{
+ polyvec_decompress_avx2(b, c);
+ poly_decompress_avx2(v, c+S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES);
+}
+
+/*************************************************
+* Name: rej_uniform
+*
+* Description: Run rejection sampling on uniform random bytes to generate
+* uniform random integers mod q
+*
+* Arguments: - int16_t *r: pointer to output array
+* - unsigned int len: requested number of 16-bit integers (uniform mod q)
+* - const uint8_t *buf: pointer to input buffer (assumed to be uniformly random bytes)
+* - unsigned int buflen: length of input buffer in bytes
+*
+* Returns number of sampled 16-bit integers (at most len)
+**************************************************/
+static unsigned int rej_uniform(int16_t *r,
+ unsigned int len,
+ const uint8_t *buf,
+ unsigned int buflen)
+{
+ unsigned int ctr, pos;
+ uint16_t val0, val1;
+
+ ctr = pos = 0;
+ while(ctr < len && pos <= buflen - 3) { // buflen is always at least 3
+ val0 = ((buf[pos+0] >> 0) | ((uint16_t)buf[pos+1] << 8)) & 0xFFF;
+ val1 = ((buf[pos+1] >> 4) | ((uint16_t)buf[pos+2] << 4)) & 0xFFF;
+ pos += 3;
+
+ if(val0 < S2N_KYBER_512_R3_Q)
+ r[ctr++] = val0;
+ if(ctr < len && val1 < S2N_KYBER_512_R3_Q)
+ r[ctr++] = val1;
+ }
+
+ return ctr;
+}
+
+#define gen_a(A,B) gen_matrix_avx2(A,B,0)
+#define gen_at(A,B) gen_matrix_avx2(A,B,1)
+
+/*************************************************
+* Name: gen_matrix_avx2
+*
+* Description: Deterministically generate matrix A (or the transpose of A)
+* from a seed. Entries of the matrix are polynomials that look
+* uniformly random. Performs rejection sampling on output of
+* a XOF
+*
+* Arguments: - polyvec *a: pointer to ouptput matrix A
+* - const uint8_t *seed: pointer to input seed
+* - int transposed: boolean deciding whether A or A^T is generated
+**************************************************/
+void gen_matrix_avx2(polyvec *a, const uint8_t seed[32], int transposed)
+{
+ unsigned int ctr0, ctr1, ctr2, ctr3;
+ ALIGNED_UINT8(S2N_KYBER_512_R3_REJ_UNIFORM_AVX_NBLOCKS*S2N_KYBER_512_R3_SHAKE128_RATE) buf[4];
+ __m256i f;
+ keccakx4_state state;
+
+ // correcting cast-align and cast-qual errors
+ // old version: f = _mm256_loadu_si256((__m256i *)seed);
+ f = _mm256_loadu_si256((const void *)seed);
+ _mm256_store_si256(buf[0].vec, f);
+ _mm256_store_si256(buf[1].vec, f);
+ _mm256_store_si256(buf[2].vec, f);
+ _mm256_store_si256(buf[3].vec, f);
+
+ if(transposed) {
+ buf[0].coeffs[32] = 0;
+ buf[0].coeffs[33] = 0;
+ buf[1].coeffs[32] = 0;
+ buf[1].coeffs[33] = 1;
+ buf[2].coeffs[32] = 1;
+ buf[2].coeffs[33] = 0;
+ buf[3].coeffs[32] = 1;
+ buf[3].coeffs[33] = 1;
+ }
+ else {
+ buf[0].coeffs[32] = 0;
+ buf[0].coeffs[33] = 0;
+ buf[1].coeffs[32] = 1;
+ buf[1].coeffs[33] = 0;
+ buf[2].coeffs[32] = 0;
+ buf[2].coeffs[33] = 1;
+ buf[3].coeffs[32] = 1;
+ buf[3].coeffs[33] = 1;
+ }
+
+ shake128x4_absorb_once(&state, buf[0].coeffs, buf[1].coeffs, buf[2].coeffs, buf[3].coeffs, 34);
+ shake128x4_squeezeblocks(buf[0].coeffs, buf[1].coeffs, buf[2].coeffs, buf[3].coeffs, S2N_KYBER_512_R3_REJ_UNIFORM_AVX_NBLOCKS, &state);
+
+ ctr0 = rej_uniform_avx2(a[0].vec[0].coeffs, buf[0].coeffs);
+ ctr1 = rej_uniform_avx2(a[0].vec[1].coeffs, buf[1].coeffs);
+ ctr2 = rej_uniform_avx2(a[1].vec[0].coeffs, buf[2].coeffs);
+ ctr3 = rej_uniform_avx2(a[1].vec[1].coeffs, buf[3].coeffs);
+
+ while(ctr0 < S2N_KYBER_512_R3_N || ctr1 < S2N_KYBER_512_R3_N || ctr2 < S2N_KYBER_512_R3_N || ctr3 < S2N_KYBER_512_R3_N) {
+ shake128x4_squeezeblocks(buf[0].coeffs, buf[1].coeffs, buf[2].coeffs, buf[3].coeffs, 1, &state);
+
+ ctr0 += rej_uniform(a[0].vec[0].coeffs + ctr0, S2N_KYBER_512_R3_N - ctr0, buf[0].coeffs, S2N_KYBER_512_R3_SHAKE128_RATE);
+ ctr1 += rej_uniform(a[0].vec[1].coeffs + ctr1, S2N_KYBER_512_R3_N - ctr1, buf[1].coeffs, S2N_KYBER_512_R3_SHAKE128_RATE);
+ ctr2 += rej_uniform(a[1].vec[0].coeffs + ctr2, S2N_KYBER_512_R3_N - ctr2, buf[2].coeffs, S2N_KYBER_512_R3_SHAKE128_RATE);
+ ctr3 += rej_uniform(a[1].vec[1].coeffs + ctr3, S2N_KYBER_512_R3_N - ctr3, buf[3].coeffs, S2N_KYBER_512_R3_SHAKE128_RATE);
+ }
+
+ poly_nttunpack_avx2(&a[0].vec[0]);
+ poly_nttunpack_avx2(&a[0].vec[1]);
+ poly_nttunpack_avx2(&a[1].vec[0]);
+ poly_nttunpack_avx2(&a[1].vec[1]);
+}
+
+/*************************************************
+* Name: indcpa_keypair_avx2
+*
+* Description: Generates public and private key for the CPA-secure
+* public-key encryption scheme underlying Kyber
+*
+* Arguments: - uint8_t *pk: pointer to output public key
+* (of length S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES bytes)
+* - uint8_t *sk: pointer to output private key
+ (of length S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES bytes)
+**************************************************/
+int indcpa_keypair_avx2(uint8_t pk[S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES],
+ uint8_t sk[S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES])
+{
+ unsigned int i;
+ uint8_t buf[2*S2N_KYBER_512_R3_SYMBYTES];
+ const uint8_t *publicseed = buf;
+ const uint8_t *noiseseed = buf + S2N_KYBER_512_R3_SYMBYTES;
+ polyvec a[S2N_KYBER_512_R3_K], e, pkpv, skpv;
+
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(buf, S2N_KYBER_512_R3_SYMBYTES));
+ sha3_512(buf, buf, S2N_KYBER_512_R3_SYMBYTES);
+
+ gen_a(a, publicseed);
+
+ poly_getnoise_eta1_4x(skpv.vec+0, skpv.vec+1, e.vec+0, e.vec+1, noiseseed, 0, 1, 2, 3);
+
+ polyvec_ntt_avx2(&skpv);
+ polyvec_reduce_avx2(&skpv);
+ polyvec_ntt_avx2(&e);
+
+ // matrix-vector multiplication
+ for(i=0;i<S2N_KYBER_512_R3_K;i++) {
+ polyvec_basemul_acc_montgomery_avx2(&pkpv.vec[i], &a[i], &skpv);
+ poly_tomont_avx2(&pkpv.vec[i]);
+ }
+
+ polyvec_add_avx2(&pkpv, &pkpv, &e);
+ polyvec_reduce_avx2(&pkpv);
+
+ pack_sk(sk, &skpv);
+ pack_pk(pk, &pkpv, publicseed);
+
+ return 0;
+}
+
+/*************************************************
+* Name: indcpa_enc_avx2
+*
+* Description: Encryption function of the CPA-secure
+* public-key encryption scheme underlying Kyber.
+*
+* Arguments: - uint8_t *c: pointer to output ciphertext
+* (of length S2N_KYBER_512_R3_INDCPA_BYTES bytes)
+* - const uint8_t *m: pointer to input message
+* (of length S2N_KYBER_512_R3_INDCPA_MSGBYTES bytes)
+* - const uint8_t *pk: pointer to input public key
+* (of length S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES)
+* - const uint8_t *coins: pointer to input random coins used as seed
+* (of length S2N_KYBER_512_R3_SYMBYTES) to deterministically
+* generate all randomness
+**************************************************/
+void indcpa_enc_avx2(uint8_t c[S2N_KYBER_512_R3_INDCPA_BYTES],
+ const uint8_t m[S2N_KYBER_512_R3_INDCPA_MSGBYTES],
+ const uint8_t pk[S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES],
+ const uint8_t coins[S2N_KYBER_512_R3_SYMBYTES])
+{
+ unsigned int i;
+ uint8_t seed[S2N_KYBER_512_R3_SYMBYTES];
+ polyvec sp, pkpv, ep, at[S2N_KYBER_512_R3_K], b;
+ poly v, k, epp;
+
+ unpack_pk(&pkpv, seed, pk);
+ poly_frommsg_avx2(&k, m);
+ gen_at(at, seed);
+
+ poly_getnoise_eta1122_4x(sp.vec+0, sp.vec+1, ep.vec+0, ep.vec+1, coins, 0, 1, 2, 3);
+ poly_getnoise_eta2_avx2(&epp, coins, 4);
+
+ polyvec_ntt_avx2(&sp);
+
+ // matrix-vector multiplication
+ for(i=0;i<S2N_KYBER_512_R3_K;i++)
+ polyvec_basemul_acc_montgomery_avx2(&b.vec[i], &at[i], &sp);
+ polyvec_basemul_acc_montgomery_avx2(&v, &pkpv, &sp);
+
+ polyvec_invntt_tomont_avx2(&b);
+ poly_invntt_tomont_avx2(&v);
+
+ polyvec_add_avx2(&b, &b, &ep);
+ poly_add_avx2(&v, &v, &epp);
+ poly_add_avx2(&v, &v, &k);
+ polyvec_reduce_avx2(&b);
+ poly_reduce_avx2(&v);
+
+ pack_ciphertext(c, &b, &v);
+}
+
+/*************************************************
+* Name: indcpa_dec_avx2
+*
+* Description: Decryption function of the CPA-secure
+* public-key encryption scheme underlying Kyber.
+*
+* Arguments: - uint8_t *m: pointer to output decrypted message
+* (of length S2N_KYBER_512_R3_INDCPA_MSGBYTES)
+* - const uint8_t *c: pointer to input ciphertext
+* (of length S2N_KYBER_512_R3_INDCPA_BYTES)
+* - const uint8_t *sk: pointer to input secret key
+* (of length S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES)
+**************************************************/
+void indcpa_dec_avx2(uint8_t m[S2N_KYBER_512_R3_INDCPA_MSGBYTES],
+ const uint8_t c[S2N_KYBER_512_R3_INDCPA_BYTES],
+ const uint8_t sk[S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES])
+{
+ polyvec b, skpv;
+ poly v, mp;
+
+ unpack_ciphertext(&b, &v, c);
+ unpack_sk(&skpv, sk);
+
+ polyvec_ntt_avx2(&b);
+ polyvec_basemul_acc_montgomery_avx2(&mp, &skpv, &b);
+ poly_invntt_tomont_avx2(&mp);
+
+ poly_sub_avx2(&mp, &v, &mp);
+ poly_reduce_avx2(&mp);
+
+ poly_tomsg_avx2(m, &mp);
+}
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa_avx2.h
new file mode 100644
index 0000000000..127e5bc4f6
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_indcpa_avx2.h
@@ -0,0 +1,25 @@
+#pragma once
+
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_polyvec_avx2.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#define gen_matrix_avx2 S2N_KYBER_512_R3_NAMESPACE(gen_matrix_avx2)
+void gen_matrix_avx2(polyvec *a, const uint8_t seed[S2N_KYBER_512_R3_SYMBYTES], int transposed);
+
+#define indcpa_keypair_avx2 S2N_KYBER_512_R3_NAMESPACE(indcpa_keypair_avx2)
+int indcpa_keypair_avx2(uint8_t pk[S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES],
+ uint8_t sk[S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES]);
+
+#define indcpa_enc_avx2 S2N_KYBER_512_R3_NAMESPACE(indcpa_enc_avx2)
+void indcpa_enc_avx2(uint8_t c[S2N_KYBER_512_R3_INDCPA_BYTES],
+ const uint8_t m[S2N_KYBER_512_R3_INDCPA_MSGBYTES],
+ const uint8_t pk[S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES],
+ const uint8_t coins[S2N_KYBER_512_R3_SYMBYTES]);
+
+#define indcpa_dec_avx2 S2N_KYBER_512_R3_NAMESPACE(indcpa_dec_avx2)
+void indcpa_dec_avx2(uint8_t m[S2N_KYBER_512_R3_INDCPA_MSGBYTES],
+ const uint8_t c[S2N_KYBER_512_R3_INDCPA_BYTES],
+ const uint8_t sk[S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES]);
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_invntt_avx2.S b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_invntt_avx2.S
new file mode 100644
index 0000000000..8f131668ff
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_invntt_avx2.S
@@ -0,0 +1,255 @@
+#include "kyber512r3_consts_avx2.h"
+
+// The small macros (.inc files) are combined with .S files directly
+/*****.include "shuffle.inc"*****/
+/********************************/
+.macro shuffle8 r0,r1,r2,r3
+vperm2i128 $0x20,%ymm\r1,%ymm\r0,%ymm\r2
+vperm2i128 $0x31,%ymm\r1,%ymm\r0,%ymm\r3
+.endm
+
+.macro shuffle4 r0,r1,r2,r3
+vpunpcklqdq %ymm\r1,%ymm\r0,%ymm\r2
+vpunpckhqdq %ymm\r1,%ymm\r0,%ymm\r3
+.endm
+
+.macro shuffle2 r0,r1,r2,r3
+#vpsllq $32,%ymm\r1,%ymm\r2
+vmovsldup %ymm\r1,%ymm\r2
+vpblendd $0xAA,%ymm\r2,%ymm\r0,%ymm\r2
+vpsrlq $32,%ymm\r0,%ymm\r0
+#vmovshdup %ymm\r0,%ymm\r0
+vpblendd $0xAA,%ymm\r1,%ymm\r0,%ymm\r3
+.endm
+
+.macro shuffle1 r0,r1,r2,r3
+vpslld $16,%ymm\r1,%ymm\r2
+vpblendw $0xAA,%ymm\r2,%ymm\r0,%ymm\r2
+vpsrld $16,%ymm\r0,%ymm\r0
+vpblendw $0xAA,%ymm\r1,%ymm\r0,%ymm\r3
+.endm
+/********************************/
+
+/*****.include "fq.inc"*****/
+/***************************/
+.macro red16 r,rs=0,x=12
+vpmulhw %ymm1,%ymm\r,%ymm\x
+.if \rs
+vpmulhrsw %ymm\rs,%ymm\x,%ymm\x
+.else
+vpsraw $10,%ymm\x,%ymm\x
+.endif
+vpmullw %ymm0,%ymm\x,%ymm\x
+vpsubw %ymm\x,%ymm\r,%ymm\r
+.endm
+
+.macro csubq r,x=12
+vpsubw %ymm0,%ymm\r,%ymm\r
+vpsraw $15,%ymm\r,%ymm\x
+vpand %ymm0,%ymm\x,%ymm\x
+vpaddw %ymm\x,%ymm\r,%ymm\r
+.endm
+
+.macro caddq r,x=12
+vpsraw $15,%ymm\r,%ymm\x
+vpand %ymm0,%ymm\x,%ymm\x
+vpaddw %ymm\x,%ymm\r,%ymm\r
+.endm
+
+.macro fqmulprecomp al,ah,b,x=12
+vpmullw %ymm\al,%ymm\b,%ymm\x
+vpmulhw %ymm\ah,%ymm\b,%ymm\b
+vpmulhw %ymm0,%ymm\x,%ymm\x
+vpsubw %ymm\x,%ymm\b,%ymm\b
+.endm
+/***************************/
+
+.macro butterfly rl0,rl1,rl2,rl3,rh0,rh1,rh2,rh3,zl0=2,zl1=2,zh0=3,zh1=3
+vpsubw %ymm\rl0,%ymm\rh0,%ymm12
+vpaddw %ymm\rh0,%ymm\rl0,%ymm\rl0
+vpsubw %ymm\rl1,%ymm\rh1,%ymm13
+
+vpmullw %ymm\zl0,%ymm12,%ymm\rh0
+vpaddw %ymm\rh1,%ymm\rl1,%ymm\rl1
+vpsubw %ymm\rl2,%ymm\rh2,%ymm14
+
+vpmullw %ymm\zl0,%ymm13,%ymm\rh1
+vpaddw %ymm\rh2,%ymm\rl2,%ymm\rl2
+vpsubw %ymm\rl3,%ymm\rh3,%ymm15
+
+vpmullw %ymm\zl1,%ymm14,%ymm\rh2
+vpaddw %ymm\rh3,%ymm\rl3,%ymm\rl3
+vpmullw %ymm\zl1,%ymm15,%ymm\rh3
+
+vpmulhw %ymm\zh0,%ymm12,%ymm12
+vpmulhw %ymm\zh0,%ymm13,%ymm13
+
+vpmulhw %ymm\zh1,%ymm14,%ymm14
+vpmulhw %ymm\zh1,%ymm15,%ymm15
+
+vpmulhw %ymm0,%ymm\rh0,%ymm\rh0
+
+vpmulhw %ymm0,%ymm\rh1,%ymm\rh1
+
+vpmulhw %ymm0,%ymm\rh2,%ymm\rh2
+vpmulhw %ymm0,%ymm\rh3,%ymm\rh3
+
+#
+
+#
+
+vpsubw %ymm\rh0,%ymm12,%ymm\rh0
+
+vpsubw %ymm\rh1,%ymm13,%ymm\rh1
+
+vpsubw %ymm\rh2,%ymm14,%ymm\rh2
+vpsubw %ymm\rh3,%ymm15,%ymm\rh3
+.endm
+
+.macro intt_levels0t5 off
+/* level 0 */
+vmovdqa _16XFLO*2(%rsi),%ymm2
+vmovdqa _16XFHI*2(%rsi),%ymm3
+
+vmovdqa (128*\off+ 0)*2(%rdi),%ymm4
+vmovdqa (128*\off+ 32)*2(%rdi),%ymm6
+vmovdqa (128*\off+ 16)*2(%rdi),%ymm5
+vmovdqa (128*\off+ 48)*2(%rdi),%ymm7
+
+fqmulprecomp 2,3,4
+fqmulprecomp 2,3,6
+fqmulprecomp 2,3,5
+fqmulprecomp 2,3,7
+
+vmovdqa (128*\off+ 64)*2(%rdi),%ymm8
+vmovdqa (128*\off+ 96)*2(%rdi),%ymm10
+vmovdqa (128*\off+ 80)*2(%rdi),%ymm9
+vmovdqa (128*\off+112)*2(%rdi),%ymm11
+
+fqmulprecomp 2,3,8
+fqmulprecomp 2,3,10
+fqmulprecomp 2,3,9
+fqmulprecomp 2,3,11
+
+vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+208)*2(%rsi),%ymm15
+vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+176)*2(%rsi),%ymm1
+vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+224)*2(%rsi),%ymm2
+vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+192)*2(%rsi),%ymm3
+vmovdqa _REVIDXB*2(%rsi),%ymm12
+vpshufb %ymm12,%ymm15,%ymm15
+vpshufb %ymm12,%ymm1,%ymm1
+vpshufb %ymm12,%ymm2,%ymm2
+vpshufb %ymm12,%ymm3,%ymm3
+
+butterfly 4,5,8,9,6,7,10,11,15,1,2,3
+
+/* level 1 */
+vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+144)*2(%rsi),%ymm2
+vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+160)*2(%rsi),%ymm3
+vmovdqa _REVIDXB*2(%rsi),%ymm1
+vpshufb %ymm1,%ymm2,%ymm2
+vpshufb %ymm1,%ymm3,%ymm3
+
+butterfly 4,5,6,7,8,9,10,11,2,2,3,3
+
+shuffle1 4,5,3,5
+shuffle1 6,7,4,7
+shuffle1 8,9,6,9
+shuffle1 10,11,8,11
+
+/* level 2 */
+vmovdqa _REVIDXD*2(%rsi),%ymm12
+vpermd (_ZETAS_EXP+(1-\off)*224+112)*2(%rsi),%ymm12,%ymm2
+vpermd (_ZETAS_EXP+(1-\off)*224+128)*2(%rsi),%ymm12,%ymm10
+
+butterfly 3,4,6,8,5,7,9,11,2,2,10,10
+
+vmovdqa _16XV*2(%rsi),%ymm1
+red16 3
+
+shuffle2 3,4,10,4
+shuffle2 6,8,3,8
+shuffle2 5,7,6,7
+shuffle2 9,11,5,11
+
+/* level 3 */
+vpermq $0x1B,(_ZETAS_EXP+(1-\off)*224+80)*2(%rsi),%ymm2
+vpermq $0x1B,(_ZETAS_EXP+(1-\off)*224+96)*2(%rsi),%ymm9
+
+butterfly 10,3,6,5,4,8,7,11,2,2,9,9
+
+shuffle4 10,3,9,3
+shuffle4 6,5,10,5
+shuffle4 4,8,6,8
+shuffle4 7,11,4,11
+
+/* level 4 */
+vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+48)*2(%rsi),%ymm2
+vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+64)*2(%rsi),%ymm7
+
+butterfly 9,10,6,4,3,5,8,11,2,2,7,7
+
+red16 9
+
+shuffle8 9,10,7,10
+shuffle8 6,4,9,4
+shuffle8 3,5,6,5
+shuffle8 8,11,3,11
+
+/* level 5 */
+vmovdqa (_ZETAS_EXP+(1-\off)*224+16)*2(%rsi),%ymm2
+vmovdqa (_ZETAS_EXP+(1-\off)*224+32)*2(%rsi),%ymm8
+
+butterfly 7,9,6,3,10,4,5,11,2,2,8,8
+
+vmovdqa %ymm7,(128*\off+ 0)*2(%rdi)
+vmovdqa %ymm9,(128*\off+ 16)*2(%rdi)
+vmovdqa %ymm6,(128*\off+ 32)*2(%rdi)
+vmovdqa %ymm3,(128*\off+ 48)*2(%rdi)
+vmovdqa %ymm10,(128*\off+ 64)*2(%rdi)
+vmovdqa %ymm4,(128*\off+ 80)*2(%rdi)
+vmovdqa %ymm5,(128*\off+ 96)*2(%rdi)
+vmovdqa %ymm11,(128*\off+112)*2(%rdi)
+.endm
+
+.macro intt_level6 off
+/* level 6 */
+vmovdqa (64*\off+ 0)*2(%rdi),%ymm4
+vmovdqa (64*\off+128)*2(%rdi),%ymm8
+vmovdqa (64*\off+ 16)*2(%rdi),%ymm5
+vmovdqa (64*\off+144)*2(%rdi),%ymm9
+vpbroadcastq (_ZETAS_EXP+0)*2(%rsi),%ymm2
+
+vmovdqa (64*\off+ 32)*2(%rdi),%ymm6
+vmovdqa (64*\off+160)*2(%rdi),%ymm10
+vmovdqa (64*\off+ 48)*2(%rdi),%ymm7
+vmovdqa (64*\off+176)*2(%rdi),%ymm11
+vpbroadcastq (_ZETAS_EXP+4)*2(%rsi),%ymm3
+
+butterfly 4,5,6,7,8,9,10,11
+
+.if \off == 0
+red16 4
+.endif
+
+vmovdqa %ymm4,(64*\off+ 0)*2(%rdi)
+vmovdqa %ymm5,(64*\off+ 16)*2(%rdi)
+vmovdqa %ymm6,(64*\off+ 32)*2(%rdi)
+vmovdqa %ymm7,(64*\off+ 48)*2(%rdi)
+vmovdqa %ymm8,(64*\off+128)*2(%rdi)
+vmovdqa %ymm9,(64*\off+144)*2(%rdi)
+vmovdqa %ymm10,(64*\off+160)*2(%rdi)
+vmovdqa %ymm11,(64*\off+176)*2(%rdi)
+.endm
+
+.text
+.global cdecl(invntt_avx2_asm)
+cdecl(invntt_avx2_asm):
+vmovdqa _16XQ*2(%rsi),%ymm0
+
+intt_levels0t5 0
+intt_levels0t5 1
+
+intt_level6 0
+intt_level6 1
+ret
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_kem.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_kem.c
new file mode 100644
index 0000000000..9d6c49b9c4
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_kem.c
@@ -0,0 +1,158 @@
+#include <stddef.h>
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_symmetric.h"
+#include "kyber512r3_indcpa.h"
+#include "kyber512r3_indcpa_avx2.h"
+#include "tls/s2n_kem.h"
+#include "utils/s2n_safety.h"
+#include "pq-crypto/s2n_pq_random.h"
+#include "pq-crypto/s2n_pq.h"
+
+/*************************************************
+* Name: crypto_kem_keypair
+*
+* Description: Generates public and private key
+* for CCA-secure Kyber key encapsulation mechanism
+*
+* Arguments: - unsigned char *pk: pointer to output public key
+* (an already allocated array of S2N_KYBER_512_R3_PUBLIC_KEY_BYTES bytes)
+* - unsigned char *sk: pointer to output private key
+* (an already allocated array of S2N_KYBER_512_R3_SECRET_KEY_BYTES bytes)
+*
+* Returns 0 (success)
+**************************************************/
+int s2n_kyber_512_r3_crypto_kem_keypair(unsigned char *pk, unsigned char *sk)
+{
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+ if (s2n_kyber512r3_is_avx2_bmi2_enabled()) {
+ POSIX_GUARD(indcpa_keypair_avx2(pk, sk));
+ }else
+#endif
+ {
+ POSIX_GUARD(indcpa_keypair(pk, sk));
+ }
+
+ for(size_t i = 0; i < S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES; i++) {
+ sk[i + S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES] = pk[i];
+ }
+ sha3_256(sk+S2N_KYBER_512_R3_SECRET_KEY_BYTES-2*S2N_KYBER_512_R3_SYMBYTES, pk, S2N_KYBER_512_R3_PUBLIC_KEY_BYTES);
+ /* Value z for pseudo-random output on reject */
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(sk+S2N_KYBER_512_R3_SECRET_KEY_BYTES-S2N_KYBER_512_R3_SYMBYTES, S2N_KYBER_512_R3_SYMBYTES));
+ return S2N_SUCCESS;
+}
+
+/*************************************************
+* Name: crypto_kem_enc
+*
+* Description: Generates cipher text and shared
+* secret for given public key
+*
+* Arguments: - unsigned char *ct: pointer to output cipher text
+* (an already allocated array of S2N_KYBER_512_R3_CIPHERTEXT_BYTES bytes)
+* - unsigned char *ss: pointer to output shared secret
+* (an already allocated array of S2N_KYBER_512_R3_SHARED_SECRET_BYTES bytes)
+* - const unsigned char *pk: pointer to input public key
+* (an already allocated array of S2N_KYBER_512_R3_PUBLIC_KEY_BYTES bytes)
+*
+* Returns 0 (success)
+**************************************************/
+int s2n_kyber_512_r3_crypto_kem_enc(unsigned char *ct, unsigned char *ss, const unsigned char *pk)
+{
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ uint8_t buf[2*S2N_KYBER_512_R3_SYMBYTES];
+ /* Will contain key, coins */
+ uint8_t kr[2*S2N_KYBER_512_R3_SYMBYTES];
+
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(buf, S2N_KYBER_512_R3_SYMBYTES));
+ /* Don't release system RNG output */
+ sha3_256(buf, buf, S2N_KYBER_512_R3_SYMBYTES);
+
+ /* Multitarget countermeasure for coins + contributory KEM */
+ sha3_256(buf+S2N_KYBER_512_R3_SYMBYTES, pk, S2N_KYBER_512_R3_PUBLIC_KEY_BYTES);
+ sha3_512(kr, buf, 2*S2N_KYBER_512_R3_SYMBYTES);
+
+ /* coins are in kr+S2N_KYBER_512_R3_SYMBYTES */
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+ if (s2n_kyber512r3_is_avx2_bmi2_enabled()) {
+ indcpa_enc_avx2(ct, buf, pk, kr+S2N_KYBER_512_R3_SYMBYTES);
+ }else
+#endif
+ {
+ indcpa_enc(ct, buf, pk, kr+S2N_KYBER_512_R3_SYMBYTES);
+ }
+
+ /* overwrite coins in kr with H(c) */
+ sha3_256(kr+S2N_KYBER_512_R3_SYMBYTES, ct, S2N_KYBER_512_R3_CIPHERTEXT_BYTES);
+ /* hash concatenation of pre-k and H(c) to k */
+ shake256(ss, S2N_KYBER_512_R3_SSBYTES, kr, 2*S2N_KYBER_512_R3_SYMBYTES);
+ return S2N_SUCCESS;
+}
+
+/*************************************************
+* Name: crypto_kem_dec
+*
+* Description: Generates shared secret for given
+* cipher text and private key
+*
+* Arguments: - unsigned char *ss: pointer to output shared secret
+* (an already allocated array of S2N_KYBER_512_R3_SHARED_SECRET_BYTES bytes)
+* - const unsigned char *ct: pointer to input cipher text
+* (an already allocated array of S2N_KYBER_512_R3_CIPHERTEXT_BYTES bytes)
+* - const unsigned char *sk: pointer to input private key
+* (an already allocated array of S2N_KYBER_512_R3_SECRET_KEY_BYTES bytes)
+*
+* Returns 0.
+*
+* On failure, ss will contain a pseudo-random value.
+**************************************************/
+int s2n_kyber_512_r3_crypto_kem_dec(unsigned char *ss, const unsigned char *ct, const unsigned char *sk)
+{
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ uint8_t buf[2*S2N_KYBER_512_R3_SYMBYTES];
+ /* Will contain key, coins */
+ uint8_t kr[2*S2N_KYBER_512_R3_SYMBYTES];
+ uint8_t cmp[S2N_KYBER_512_R3_CIPHERTEXT_BYTES];
+ const uint8_t *pk = sk+S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES;
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+ if (s2n_kyber512r3_is_avx2_bmi2_enabled()) {
+ indcpa_dec_avx2(buf, ct, sk);
+ }else
+#endif
+ {
+ indcpa_dec(buf, ct, sk);
+ }
+
+ /* Multitarget countermeasure for coins + contributory KEM */
+ for(size_t i = 0; i < S2N_KYBER_512_R3_SYMBYTES; i++) {
+ buf[S2N_KYBER_512_R3_SYMBYTES + i] = sk[S2N_KYBER_512_R3_SECRET_KEY_BYTES - 2 * S2N_KYBER_512_R3_SYMBYTES + i];
+ }
+ sha3_512(kr, buf, 2*S2N_KYBER_512_R3_SYMBYTES);
+
+ /* coins are in kr+S2N_KYBER_512_R3_SYMBYTES */
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+ if (s2n_kyber512r3_is_avx2_bmi2_enabled()) {
+ indcpa_enc_avx2(cmp, buf, pk, kr+S2N_KYBER_512_R3_SYMBYTES);
+ }else
+#endif
+ {
+ indcpa_enc(cmp, buf, pk, kr+S2N_KYBER_512_R3_SYMBYTES);
+ }
+
+ /* If ct and cmp are equal (dont_copy = 1), decryption has succeeded and we do NOT overwrite pre-k below.
+ * If ct and cmp are not equal (dont_copy = 0), decryption fails and we do overwrite pre-k. */
+ int dont_copy = s2n_constant_time_equals(ct, cmp, S2N_KYBER_512_R3_CIPHERTEXT_BYTES);
+
+ /* overwrite coins in kr with H(c) */
+ sha3_256(kr+S2N_KYBER_512_R3_SYMBYTES, ct, S2N_KYBER_512_R3_CIPHERTEXT_BYTES);
+
+ /* Overwrite pre-k with z on re-encryption failure */
+ POSIX_GUARD(s2n_constant_time_copy_or_dont(kr, sk+S2N_KYBER_512_R3_SECRET_KEY_BYTES-S2N_KYBER_512_R3_SYMBYTES,
+ S2N_KYBER_512_R3_SYMBYTES, dont_copy));
+
+ /* hash concatenation of pre-k and H(c) to k */
+ shake256(ss, S2N_KYBER_512_R3_SSBYTES, kr, 2*S2N_KYBER_512_R3_SYMBYTES);
+ return S2N_SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt.c
new file mode 100644
index 0000000000..6c82105c19
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt.c
@@ -0,0 +1,122 @@
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_ntt.h"
+#include "kyber512r3_reduce.h"
+
+const int16_t zetas[128] = {
+ 2285, 2571, 2970, 1812, 1493, 1422, 287, 202, 3158, 622, 1577, 182, 962,
+ 2127, 1855, 1468, 573, 2004, 264, 383, 2500, 1458, 1727, 3199, 2648, 1017,
+ 732, 608, 1787, 411, 3124, 1758, 1223, 652, 2777, 1015, 2036, 1491, 3047,
+ 1785, 516, 3321, 3009, 2663, 1711, 2167, 126, 1469, 2476, 3239, 3058, 830,
+ 107, 1908, 3082, 2378, 2931, 961, 1821, 2604, 448, 2264, 677, 2054, 2226,
+ 430, 555, 843, 2078, 871, 1550, 105, 422, 587, 177, 3094, 3038, 2869, 1574,
+ 1653, 3083, 778, 1159, 3182, 2552, 1483, 2727, 1119, 1739, 644, 2457, 349,
+ 418, 329, 3173, 3254, 817, 1097, 603, 610, 1322, 2044, 1864, 384, 2114, 3193,
+ 1218, 1994, 2455, 220, 2142, 1670, 2144, 1799, 2051, 794, 1819, 2475, 2459,
+ 478, 3221, 3021, 996, 991, 958, 1869, 1522, 1628
+};
+
+const int16_t zetas_inv[128] = {
+ 1701, 1807, 1460, 2371, 2338, 2333, 308, 108, 2851, 870, 854, 1510, 2535,
+ 1278, 1530, 1185, 1659, 1187, 3109, 874, 1335, 2111, 136, 1215, 2945, 1465,
+ 1285, 2007, 2719, 2726, 2232, 2512, 75, 156, 3000, 2911, 2980, 872, 2685,
+ 1590, 2210, 602, 1846, 777, 147, 2170, 2551, 246, 1676, 1755, 460, 291, 235,
+ 3152, 2742, 2907, 3224, 1779, 2458, 1251, 2486, 2774, 2899, 1103, 1275, 2652,
+ 1065, 2881, 725, 1508, 2368, 398, 951, 247, 1421, 3222, 2499, 271, 90, 853,
+ 1860, 3203, 1162, 1618, 666, 320, 8, 2813, 1544, 282, 1838, 1293, 2314, 552,
+ 2677, 2106, 1571, 205, 2918, 1542, 2721, 2597, 2312, 681, 130, 1602, 1871,
+ 829, 2946, 3065, 1325, 2756, 1861, 1474, 1202, 2367, 3147, 1752, 2707, 171,
+ 3127, 3042, 1907, 1836, 1517, 359, 758, 1441
+};
+
+/*************************************************
+* Name: fqmul
+*
+* Description: Multiplication followed by Montgomery reduction
+*
+* Arguments: - int16_t a: first factor
+* - int16_t b: second factor
+*
+* Returns 16-bit integer congruent to a*b*R^{-1} mod q
+**************************************************/
+static int16_t fqmul(int16_t a, int16_t b) {
+ return montgomery_reduce((int32_t)a * b);
+}
+
+/*************************************************
+* Name: ntt
+*
+* Description: Inplace number-theoretic transform (NTT) in Rq
+* input is in standard order, output is in bitreversed order
+*
+* Arguments: - int16_t r[256]: pointer to input/output vector of elements
+* of Zq
+**************************************************/
+void ntt(int16_t r[256]) {
+ unsigned int len, start, j, k;
+ int16_t t, zeta;
+
+ k = 1;
+ for (len = 128; len >= 2; len >>= 1) {
+ for (start = 0; start < 256; start = j + len) {
+ zeta = zetas[k++];
+ for (j = start; j < start + len; ++j) {
+ t = fqmul(zeta, r[j + len]);
+ r[j + len] = r[j] - t;
+ r[j] = r[j] + t;
+ }
+ }
+ }
+}
+
+/*************************************************
+* Name: invntt_tomont
+*
+* Description: Inplace inverse number-theoretic transform in Rq and
+* multiplication by Montgomery factor 2^16.
+* Input is in bitreversed order, output is in standard order
+*
+* Arguments: - int16_t r[256]: pointer to input/output vector of elements
+* of Zq
+**************************************************/
+void invntt(int16_t r[256]) {
+ unsigned int start, len, j, k;
+ int16_t t, zeta;
+
+ k = 0;
+ for (len = 2; len <= 128; len <<= 1) {
+ for (start = 0; start < 256; start = j + len) {
+ zeta = zetas_inv[k++];
+ for (j = start; j < start + len; ++j) {
+ t = r[j];
+ r[j] = barrett_reduce(t + r[j + len]);
+ r[j + len] = t - r[j + len];
+ r[j + len] = fqmul(zeta, r[j + len]);
+ }
+ }
+ }
+
+ for (j = 0; j < 256; ++j) {
+ r[j] = fqmul(r[j], zetas_inv[127]);
+ }
+}
+
+/*************************************************
+* Name: basemul
+*
+* Description: Multiplication of polynomials in Zq[X]/(X^2-zeta)
+* used for multiplication of elements in Rq in NTT domain
+*
+* Arguments: - int16_t r[2]: pointer to the output polynomial
+* - const int16_t a[2]: pointer to the first factor
+* - const int16_t b[2]: pointer to the second factor
+* - int16_t zeta: integer defining the reduction polynomial
+**************************************************/
+void basemul(int16_t r[2], const int16_t a[2], const int16_t b[2], int16_t zeta) {
+ r[0] = fqmul(a[1], b[1]);
+ r[0] = fqmul(r[0], zeta);
+ r[0] += fqmul(a[0], b[0]);
+
+ r[1] = fqmul(a[0], b[1]);
+ r[1] += fqmul(a[1], b[0]);
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt.h
new file mode 100644
index 0000000000..98d6235764
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt.h
@@ -0,0 +1,19 @@
+#pragma once
+
+#include <stdint.h>
+#include "kyber512r3_params.h"
+
+#define zetas S2N_KYBER_512_R3_NAMESPACE(zetas)
+extern const int16_t zetas[128];
+
+#define zetas_inv S2N_KYBER_512_R3_NAMESPACE(zetas_inv)
+extern const int16_t zetas_inv[128];
+
+#define ntt S2N_KYBER_512_R3_NAMESPACE(ntt)
+void ntt(int16_t poly[256]);
+
+#define invntt S2N_KYBER_512_R3_NAMESPACE(invntt)
+void invntt(int16_t poly[256]);
+
+#define basemul S2N_KYBER_512_R3_NAMESPACE(basemul)
+void basemul(int16_t r[2], const int16_t a[2], const int16_t b[2], int16_t zeta);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt_avx2.S b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt_avx2.S
new file mode 100644
index 0000000000..dc80086cb1
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt_avx2.S
@@ -0,0 +1,218 @@
+#include "kyber512r3_consts_avx2.h"
+
+// The small macros (.inc files) are combined with .S files directly
+/*****.include "shuffle.inc"*****/
+/********************************/
+.macro shuffle8 r0,r1,r2,r3
+vperm2i128 $0x20,%ymm\r1,%ymm\r0,%ymm\r2
+vperm2i128 $0x31,%ymm\r1,%ymm\r0,%ymm\r3
+.endm
+
+.macro shuffle4 r0,r1,r2,r3
+vpunpcklqdq %ymm\r1,%ymm\r0,%ymm\r2
+vpunpckhqdq %ymm\r1,%ymm\r0,%ymm\r3
+.endm
+
+.macro shuffle2 r0,r1,r2,r3
+#vpsllq $32,%ymm\r1,%ymm\r2
+vmovsldup %ymm\r1,%ymm\r2
+vpblendd $0xAA,%ymm\r2,%ymm\r0,%ymm\r2
+vpsrlq $32,%ymm\r0,%ymm\r0
+#vmovshdup %ymm\r0,%ymm\r0
+vpblendd $0xAA,%ymm\r1,%ymm\r0,%ymm\r3
+.endm
+
+.macro shuffle1 r0,r1,r2,r3
+vpslld $16,%ymm\r1,%ymm\r2
+vpblendw $0xAA,%ymm\r2,%ymm\r0,%ymm\r2
+vpsrld $16,%ymm\r0,%ymm\r0
+vpblendw $0xAA,%ymm\r1,%ymm\r0,%ymm\r3
+.endm
+/********************************/
+
+.macro mul rh0,rh1,rh2,rh3,zl0=15,zl1=15,zh0=2,zh1=2
+vpmullw %ymm\zl0,%ymm\rh0,%ymm12
+vpmullw %ymm\zl0,%ymm\rh1,%ymm13
+
+vpmullw %ymm\zl1,%ymm\rh2,%ymm14
+vpmullw %ymm\zl1,%ymm\rh3,%ymm15
+
+vpmulhw %ymm\zh0,%ymm\rh0,%ymm\rh0
+vpmulhw %ymm\zh0,%ymm\rh1,%ymm\rh1
+
+vpmulhw %ymm\zh1,%ymm\rh2,%ymm\rh2
+vpmulhw %ymm\zh1,%ymm\rh3,%ymm\rh3
+.endm
+
+.macro reduce
+vpmulhw %ymm0,%ymm12,%ymm12
+vpmulhw %ymm0,%ymm13,%ymm13
+
+vpmulhw %ymm0,%ymm14,%ymm14
+vpmulhw %ymm0,%ymm15,%ymm15
+.endm
+
+.macro update rln,rl0,rl1,rl2,rl3,rh0,rh1,rh2,rh3
+vpaddw %ymm\rh0,%ymm\rl0,%ymm\rln
+vpsubw %ymm\rh0,%ymm\rl0,%ymm\rh0
+vpaddw %ymm\rh1,%ymm\rl1,%ymm\rl0
+
+vpsubw %ymm\rh1,%ymm\rl1,%ymm\rh1
+vpaddw %ymm\rh2,%ymm\rl2,%ymm\rl1
+vpsubw %ymm\rh2,%ymm\rl2,%ymm\rh2
+
+vpaddw %ymm\rh3,%ymm\rl3,%ymm\rl2
+vpsubw %ymm\rh3,%ymm\rl3,%ymm\rh3
+
+vpsubw %ymm12,%ymm\rln,%ymm\rln
+vpaddw %ymm12,%ymm\rh0,%ymm\rh0
+vpsubw %ymm13,%ymm\rl0,%ymm\rl0
+
+vpaddw %ymm13,%ymm\rh1,%ymm\rh1
+vpsubw %ymm14,%ymm\rl1,%ymm\rl1
+vpaddw %ymm14,%ymm\rh2,%ymm\rh2
+
+vpsubw %ymm15,%ymm\rl2,%ymm\rl2
+vpaddw %ymm15,%ymm\rh3,%ymm\rh3
+.endm
+
+.macro level0 off
+vpbroadcastq (_ZETAS_EXP+0)*2(%rsi),%ymm15
+vmovdqa (64*\off+128)*2(%rdi),%ymm8
+vmovdqa (64*\off+144)*2(%rdi),%ymm9
+vmovdqa (64*\off+160)*2(%rdi),%ymm10
+vmovdqa (64*\off+176)*2(%rdi),%ymm11
+vpbroadcastq (_ZETAS_EXP+4)*2(%rsi),%ymm2
+
+mul 8,9,10,11
+
+vmovdqa (64*\off+ 0)*2(%rdi),%ymm4
+vmovdqa (64*\off+ 16)*2(%rdi),%ymm5
+vmovdqa (64*\off+ 32)*2(%rdi),%ymm6
+vmovdqa (64*\off+ 48)*2(%rdi),%ymm7
+
+reduce
+update 3,4,5,6,7,8,9,10,11
+
+vmovdqa %ymm3,(64*\off+ 0)*2(%rdi)
+vmovdqa %ymm4,(64*\off+ 16)*2(%rdi)
+vmovdqa %ymm5,(64*\off+ 32)*2(%rdi)
+vmovdqa %ymm6,(64*\off+ 48)*2(%rdi)
+vmovdqa %ymm8,(64*\off+128)*2(%rdi)
+vmovdqa %ymm9,(64*\off+144)*2(%rdi)
+vmovdqa %ymm10,(64*\off+160)*2(%rdi)
+vmovdqa %ymm11,(64*\off+176)*2(%rdi)
+.endm
+
+.macro levels1t6 off
+/* level 1 */
+vmovdqa (_ZETAS_EXP+224*\off+16)*2(%rsi),%ymm15
+vmovdqa (128*\off+ 64)*2(%rdi),%ymm8
+vmovdqa (128*\off+ 80)*2(%rdi),%ymm9
+vmovdqa (128*\off+ 96)*2(%rdi),%ymm10
+vmovdqa (128*\off+112)*2(%rdi),%ymm11
+vmovdqa (_ZETAS_EXP+224*\off+32)*2(%rsi),%ymm2
+
+mul 8,9,10,11
+
+vmovdqa (128*\off+ 0)*2(%rdi),%ymm4
+vmovdqa (128*\off+ 16)*2(%rdi),%ymm5
+vmovdqa (128*\off+ 32)*2(%rdi),%ymm6
+vmovdqa (128*\off+ 48)*2(%rdi),%ymm7
+
+reduce
+update 3,4,5,6,7,8,9,10,11
+
+/* level 2 */
+shuffle8 5,10,7,10
+shuffle8 6,11,5,11
+
+vmovdqa (_ZETAS_EXP+224*\off+48)*2(%rsi),%ymm15
+vmovdqa (_ZETAS_EXP+224*\off+64)*2(%rsi),%ymm2
+
+mul 7,10,5,11
+
+shuffle8 3,8,6,8
+shuffle8 4,9,3,9
+
+reduce
+update 4,6,8,3,9,7,10,5,11
+
+/* level 3 */
+shuffle4 8,5,9,5
+shuffle4 3,11,8,11
+
+vmovdqa (_ZETAS_EXP+224*\off+80)*2(%rsi),%ymm15
+vmovdqa (_ZETAS_EXP+224*\off+96)*2(%rsi),%ymm2
+
+mul 9,5,8,11
+
+shuffle4 4,7,3,7
+shuffle4 6,10,4,10
+
+reduce
+update 6,3,7,4,10,9,5,8,11
+
+/* level 4 */
+shuffle2 7,8,10,8
+shuffle2 4,11,7,11
+
+vmovdqa (_ZETAS_EXP+224*\off+112)*2(%rsi),%ymm15
+vmovdqa (_ZETAS_EXP+224*\off+128)*2(%rsi),%ymm2
+
+mul 10,8,7,11
+
+shuffle2 6,9,4,9
+shuffle2 3,5,6,5
+
+reduce
+update 3,4,9,6,5,10,8,7,11
+
+/* level 5 */
+shuffle1 9,7,5,7
+shuffle1 6,11,9,11
+
+vmovdqa (_ZETAS_EXP+224*\off+144)*2(%rsi),%ymm15
+vmovdqa (_ZETAS_EXP+224*\off+160)*2(%rsi),%ymm2
+
+mul 5,7,9,11
+
+shuffle1 3,10,6,10
+shuffle1 4,8,3,8
+
+reduce
+update 4,6,10,3,8,5,7,9,11
+
+/* level 6 */
+vmovdqa (_ZETAS_EXP+224*\off+176)*2(%rsi),%ymm14
+vmovdqa (_ZETAS_EXP+224*\off+208)*2(%rsi),%ymm15
+vmovdqa (_ZETAS_EXP+224*\off+192)*2(%rsi),%ymm8
+vmovdqa (_ZETAS_EXP+224*\off+224)*2(%rsi),%ymm2
+
+mul 10,3,9,11,14,15,8,2
+
+reduce
+update 8,4,6,5,7,10,3,9,11
+
+vmovdqa %ymm8,(128*\off+ 0)*2(%rdi)
+vmovdqa %ymm4,(128*\off+ 16)*2(%rdi)
+vmovdqa %ymm10,(128*\off+ 32)*2(%rdi)
+vmovdqa %ymm3,(128*\off+ 48)*2(%rdi)
+vmovdqa %ymm6,(128*\off+ 64)*2(%rdi)
+vmovdqa %ymm5,(128*\off+ 80)*2(%rdi)
+vmovdqa %ymm9,(128*\off+ 96)*2(%rdi)
+vmovdqa %ymm11,(128*\off+112)*2(%rdi)
+.endm
+
+.text
+.global cdecl(ntt_avx2_asm)
+cdecl(ntt_avx2_asm):
+vmovdqa _16XQ*2(%rsi),%ymm0
+
+level0 0
+level0 1
+
+levels1t6 0
+levels1t6 1
+
+ret
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt_avx2.h
new file mode 100644
index 0000000000..3616132358
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_ntt_avx2.h
@@ -0,0 +1,28 @@
+#pragma once
+
+#include <stdint.h>
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#include <immintrin.h>
+
+#define ntt_avx2_asm S2N_KYBER_512_R3_NAMESPACE(ntt_avx2_asm)
+void ntt_avx2_asm(__m256i *r, const __m256i *qdata);
+
+#define invntt_avx2_asm S2N_KYBER_512_R3_NAMESPACE(invntt_avx2_asm)
+void invntt_avx2_asm(__m256i *r, const __m256i *qdata);
+
+#define nttunpack_avx2_asm S2N_KYBER_512_R3_NAMESPACE(nttunpack_avx2_asm)
+void nttunpack_avx2_asm(__m256i *r, const __m256i *qdata);
+
+#define basemul_avx2_asm S2N_KYBER_512_R3_NAMESPACE(basemul_avx2_asm)
+void basemul_avx2_asm(__m256i *r,
+ const __m256i *a,
+ const __m256i *b,
+ const __m256i *qdata);
+
+#define ntttobytes_avx2_asm S2N_KYBER_512_R3_NAMESPACE(ntttobytes_avx2_asm)
+void ntttobytes_avx2_asm(uint8_t *r, const __m256i *a, const __m256i *qdata);
+
+#define nttfrombytes_avx2_asm S2N_KYBER_512_R3_NAMESPACE(nttfrombytes_avx2_asm)
+void nttfrombytes_avx2_asm(__m256i *r, const uint8_t *a, const __m256i *qdata);
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_params.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_params.h
new file mode 100644
index 0000000000..d2d32d08f1
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_params.h
@@ -0,0 +1,31 @@
+#pragma once
+
+/* All kyber512r3 functions and global variables in the pq-crypto/kyber_r3 directory
+ * should be defined using the namespace macro to avoid symbol collisions. For example,
+ * in foo.h, declare a function as follows:
+ *
+ * #define foo_function S2N_KYBER_512_R3_NAMESPACE(foo_function)
+ * int foo_function(int foo_argument); */
+#define S2N_KYBER_512_R3_NAMESPACE(s) s2n_kyber_512_r3_##s
+
+#define S2N_KYBER_512_R3_K 2
+
+#define S2N_KYBER_512_R3_N 256
+#define S2N_KYBER_512_R3_Q 3329
+
+#define S2N_KYBER_512_R3_SYMBYTES 32 /* size in bytes of hashes, and seeds */
+#define S2N_KYBER_512_R3_SSBYTES 32 /* size in bytes of shared key */
+
+#define S2N_KYBER_512_R3_POLYBYTES 384
+#define S2N_KYBER_512_R3_POLYVECBYTES (S2N_KYBER_512_R3_K * S2N_KYBER_512_R3_POLYBYTES)
+
+#define S2N_KYBER_512_R3_ETA1 3
+#define S2N_KYBER_512_R3_POLYCOMPRESSEDBYTES 128
+#define S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES 640
+
+#define S2N_KYBER_512_R3_ETA2 2
+
+#define S2N_KYBER_512_R3_INDCPA_MSGBYTES S2N_KYBER_512_R3_SYMBYTES
+#define S2N_KYBER_512_R3_INDCPA_PUBLICKEYBYTES (S2N_KYBER_512_R3_POLYVECBYTES + S2N_KYBER_512_R3_SYMBYTES)
+#define S2N_KYBER_512_R3_INDCPA_SECRETKEYBYTES (S2N_KYBER_512_R3_POLYVECBYTES)
+#define S2N_KYBER_512_R3_INDCPA_BYTES (S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES + S2N_KYBER_512_R3_POLYCOMPRESSEDBYTES)
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly.c
new file mode 100644
index 0000000000..76ae60a583
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly.c
@@ -0,0 +1,300 @@
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_poly.h"
+#include "kyber512r3_ntt.h"
+#include "kyber512r3_reduce.h"
+#include "kyber512r3_cbd.h"
+#include "kyber512r3_symmetric.h"
+
+/*************************************************
+* Name: poly_compress
+*
+* Description: Compression and subsequent serialization of a polynomial
+*
+* Arguments: - uint8_t *r: pointer to output byte array
+* (of length S2N_KYBER_512_R3_POLYCOMPRESSEDBYTES)
+* - poly *a: pointer to input polynomial
+**************************************************/
+void poly_compress(uint8_t r[S2N_KYBER_512_R3_POLYCOMPRESSEDBYTES], poly *a) {
+ unsigned int i, j;
+ uint8_t t[8];
+
+ poly_csubq(a);
+
+ for (i = 0; i < S2N_KYBER_512_R3_N / 8; i++) {
+ for (j = 0; j < 8; j++) {
+ t[j] = ((((uint16_t)a->coeffs[8 * i + j] << 4) + S2N_KYBER_512_R3_Q / 2) / S2N_KYBER_512_R3_Q) & 15;
+ }
+
+ r[0] = t[0] | (t[1] << 4);
+ r[1] = t[2] | (t[3] << 4);
+ r[2] = t[4] | (t[5] << 4);
+ r[3] = t[6] | (t[7] << 4);
+ r += 4;
+ }
+}
+
+/*************************************************
+* Name: poly_decompress
+*
+* Description: De-serialization and subsequent decompression of a polynomial;
+* approximate inverse of poly_compress
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const uint8_t *a: pointer to input byte array
+* (of length S2N_KYBER_512_R3_POLYCOMPRESSEDBYTES bytes)
+**************************************************/
+void poly_decompress(poly *r, const uint8_t a[S2N_KYBER_512_R3_POLYCOMPRESSEDBYTES]) {
+ unsigned int i;
+
+ for (i = 0; i < S2N_KYBER_512_R3_N / 2; i++) {
+ r->coeffs[2 * i + 0] = (((uint16_t)(a[0] & 15) * S2N_KYBER_512_R3_Q) + 8) >> 4;
+ r->coeffs[2 * i + 1] = (((uint16_t)(a[0] >> 4) * S2N_KYBER_512_R3_Q) + 8) >> 4;
+ a += 1;
+ }
+}
+
+/*************************************************
+* Name: poly_tobytes
+*
+* Description: Serialization of a polynomial
+*
+* Arguments: - uint8_t *r: pointer to output byte array
+* (needs space for S2N_KYBER_512_R3_POLYBYTES bytes)
+* - poly *a: pointer to input polynomial
+**************************************************/
+void poly_tobytes(uint8_t r[S2N_KYBER_512_R3_POLYBYTES], poly *a) {
+ unsigned int i;
+
+ poly_csubq(a);
+
+ for (i = 0; i < S2N_KYBER_512_R3_N / 2; i++) {
+ uint16_t t0 = a->coeffs[2 * i];
+ uint16_t t1 = a->coeffs[2 * i + 1];
+ r[3 * i + 0] = (t0 >> 0);
+ r[3 * i + 1] = (t0 >> 8) | (t1 << 4);
+ r[3 * i + 2] = (t1 >> 4);
+ }
+}
+
+/*************************************************
+* Name: poly_frombytes
+*
+* Description: De-serialization of a polynomial;
+* inverse of poly_tobytes
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const uint8_t *a: pointer to input byte array
+* (of S2N_KYBER_512_R3_POLYBYTES bytes)
+**************************************************/
+void poly_frombytes(poly *r, const uint8_t a[S2N_KYBER_512_R3_POLYBYTES]) {
+ unsigned int i;
+ for (i = 0; i < S2N_KYBER_512_R3_N / 2; i++) {
+ r->coeffs[2 * i] = ((a[3 * i + 0] >> 0) | ((uint16_t)a[3 * i + 1] << 8)) & 0xFFF;
+ r->coeffs[2 * i + 1] = ((a[3 * i + 1] >> 4) | ((uint16_t)a[3 * i + 2] << 4)) & 0xFFF;
+ }
+}
+
+/*************************************************
+* Name: poly_frommsg
+*
+* Description: Convert 32-byte message to polynomial
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const uint8_t *msg: pointer to input message
+**************************************************/
+void poly_frommsg(poly *r, const uint8_t msg[S2N_KYBER_512_R3_INDCPA_MSGBYTES]) {
+ unsigned int i, j;
+ int16_t mask;
+
+ for (i = 0; i < S2N_KYBER_512_R3_N / 8; i++) {
+ for (j = 0; j < 8; j++) {
+ mask = -(int16_t)((msg[i] >> j) & 1);
+ r->coeffs[8 * i + j] = mask & ((S2N_KYBER_512_R3_Q + 1) / 2);
+ }
+ }
+}
+
+/*************************************************
+* Name: poly_tomsg
+*
+* Description: Convert polynomial to 32-byte message
+*
+* Arguments: - uint8_t *msg: pointer to output message
+* - poly *a: pointer to input polynomial
+**************************************************/
+void poly_tomsg(uint8_t msg[S2N_KYBER_512_R3_INDCPA_MSGBYTES], poly *a) {
+ unsigned int i, j;
+ uint16_t t;
+
+ poly_csubq(a);
+
+ for (i = 0; i < S2N_KYBER_512_R3_N / 8; i++) {
+ msg[i] = 0;
+ for (j = 0; j < 8; j++) {
+ t = ((((uint16_t)a->coeffs[8 * i + j] << 1) + S2N_KYBER_512_R3_Q / 2) / S2N_KYBER_512_R3_Q) & 1;
+ msg[i] |= t << j;
+ }
+ }
+}
+
+/*************************************************
+* Name: poly_getnoise_eta1
+*
+* Description: Sample a polynomial deterministically from a seed and a nonce,
+* with output polynomial close to centered binomial distribution
+* with parameter S2N_KYBER_512_R3_ETA1
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const uint8_t *seed: pointer to input seed
+* (of length S2N_KYBER_512_R3_SYMBYTES bytes)
+* - uint8_t nonce: one-byte input nonce
+**************************************************/
+void poly_getnoise_eta1(poly *r, const uint8_t seed[S2N_KYBER_512_R3_SYMBYTES], uint8_t nonce) {
+ uint8_t buf[S2N_KYBER_512_R3_ETA1 * S2N_KYBER_512_R3_N / 4];
+ shake256_prf(buf, sizeof(buf), seed, nonce);
+ cbd_eta1(r, buf);
+}
+
+/*************************************************
+* Name: poly_getnoise_eta2
+*
+* Description: Sample a polynomial deterministically from a seed and a nonce,
+* with output polynomial close to centered binomial distribution
+* with parameter S2N_KYBER_512_R3_ETA2
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const uint8_t *seed: pointer to input seed
+* (of length S2N_KYBER_512_R3_SYMBYTES bytes)
+* - uint8_t nonce: one-byte input nonce
+**************************************************/
+void poly_getnoise_eta2(poly *r, const uint8_t seed[S2N_KYBER_512_R3_SYMBYTES], uint8_t nonce) {
+ uint8_t buf[S2N_KYBER_512_R3_ETA2 * S2N_KYBER_512_R3_N / 4];
+ shake256_prf(buf, sizeof(buf), seed, nonce);
+ cbd_eta2(r, buf);
+}
+
+
+/*************************************************
+* Name: poly_ntt
+*
+* Description: Computes negacyclic number-theoretic transform (NTT) of
+* a polynomial in place;
+* inputs assumed to be in normal order, output in bitreversed order
+*
+* Arguments: - uint16_t *r: pointer to in/output polynomial
+**************************************************/
+void poly_ntt(poly *r) {
+ ntt(r->coeffs);
+ poly_reduce(r);
+}
+
+/*************************************************
+* Name: poly_invntt_tomont
+*
+* Description: Computes inverse of negacyclic number-theoretic transform (NTT)
+* of a polynomial in place;
+* inputs assumed to be in bitreversed order, output in normal order
+*
+* Arguments: - uint16_t *a: pointer to in/output polynomial
+**************************************************/
+void poly_invntt_tomont(poly *r) {
+ invntt(r->coeffs);
+}
+
+/*************************************************
+* Name: poly_basemul_montgomery
+*
+* Description: Multiplication of two polynomials in NTT domain
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const poly *a: pointer to first input polynomial
+* - const poly *b: pointer to second input polynomial
+**************************************************/
+void poly_basemul_montgomery(poly *r, const poly *a, const poly *b) {
+ unsigned int i;
+ for (i = 0; i < S2N_KYBER_512_R3_N / 4; i++) {
+ basemul(&r->coeffs[4 * i], &a->coeffs[4 * i], &b->coeffs[4 * i], zetas[64 + i]);
+ basemul(&r->coeffs[4 * i + 2], &a->coeffs[4 * i + 2], &b->coeffs[4 * i + 2],
+ -zetas[64 + i]);
+ }
+}
+
+/*************************************************
+* Name: poly_tomont
+*
+* Description: Inplace conversion of all coefficients of a polynomial
+* from normal domain to Montgomery domain
+*
+* Arguments: - poly *r: pointer to input/output polynomial
+**************************************************/
+void poly_tomont(poly *r) {
+ unsigned int i;
+ const int16_t f = (1ULL << 32) % S2N_KYBER_512_R3_Q;
+ for (i = 0; i < S2N_KYBER_512_R3_N; i++) {
+ r->coeffs[i] = montgomery_reduce((int32_t)r->coeffs[i] * f);
+ }
+}
+
+/*************************************************
+* Name: poly_reduce
+*
+* Description: Applies Barrett reduction to all coefficients of a polynomial
+* for details of the Barrett reduction see comments in reduce.c
+*
+* Arguments: - poly *r: pointer to input/output polynomial
+**************************************************/
+void poly_reduce(poly *r) {
+ unsigned int i;
+ for (i = 0; i < S2N_KYBER_512_R3_N; i++) {
+ r->coeffs[i] = barrett_reduce(r->coeffs[i]);
+ }
+}
+
+/*************************************************
+* Name: poly_csubq
+*
+* Description: Applies conditional subtraction of q to each coefficient
+* of a polynomial. For details of conditional subtraction
+* of q see comments in reduce.c
+*
+* Arguments: - poly *r: pointer to input/output polynomial
+**************************************************/
+void poly_csubq(poly *r) {
+ unsigned int i;
+ for (i = 0; i < S2N_KYBER_512_R3_N; i++) {
+ r->coeffs[i] = csubq(r->coeffs[i]);
+ }
+}
+
+/*************************************************
+* Name: poly_add
+*
+* Description: Add two polynomials
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const poly *a: pointer to first input polynomial
+* - const poly *b: pointer to second input polynomial
+**************************************************/
+void poly_add(poly *r, const poly *a, const poly *b) {
+ unsigned int i;
+ for (i = 0; i < S2N_KYBER_512_R3_N; i++) {
+ r->coeffs[i] = a->coeffs[i] + b->coeffs[i];
+ }
+}
+
+/*************************************************
+* Name: poly_sub
+*
+* Description: Subtract two polynomials
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const poly *a: pointer to first input polynomial
+* - const poly *b: pointer to second input polynomial
+**************************************************/
+void poly_sub(poly *r, const poly *a, const poly *b) {
+ unsigned int i;
+ for (i = 0; i < S2N_KYBER_512_R3_N; i++) {
+ r->coeffs[i] = a->coeffs[i] - b->coeffs[i];
+ }
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly.h
new file mode 100644
index 0000000000..da43766e51
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly.h
@@ -0,0 +1,61 @@
+#pragma once
+
+#include <stdint.h>
+#include "kyber512r3_params.h"
+
+/*
+ * Elements of R_q = Z_q[X]/(X^n + 1). Represents polynomial
+ * coeffs[0] + X*coeffs[1] + X^2*xoeffs[2] + ... + X^{n-1}*coeffs[n-1]
+ */
+#define poly S2N_KYBER_512_R3_NAMESPACE(poly)
+typedef struct {
+ int16_t coeffs[S2N_KYBER_512_R3_N];
+} poly;
+
+#define poly_compress S2N_KYBER_512_R3_NAMESPACE(poly_compress)
+void poly_compress(uint8_t r[S2N_KYBER_512_R3_POLYCOMPRESSEDBYTES], poly *a);
+
+#define poly_decompress S2N_KYBER_512_R3_NAMESPACE(poly_decompress)
+void poly_decompress(poly *r, const uint8_t a[S2N_KYBER_512_R3_POLYCOMPRESSEDBYTES]);
+
+#define poly_tobytes S2N_KYBER_512_R3_NAMESPACE(poly_tobytes)
+void poly_tobytes(uint8_t r[S2N_KYBER_512_R3_POLYBYTES], poly *a);
+
+#define poly_frombytes S2N_KYBER_512_R3_NAMESPACE(poly_frombytes)
+void poly_frombytes(poly *r, const uint8_t a[S2N_KYBER_512_R3_POLYBYTES]);
+
+#define poly_frommsg S2N_KYBER_512_R3_NAMESPACE(poly_frommsg)
+void poly_frommsg(poly *r, const uint8_t msg[S2N_KYBER_512_R3_INDCPA_MSGBYTES]);
+
+#define poly_tomsg S2N_KYBER_512_R3_NAMESPACE(poly_tomsg)
+void poly_tomsg(uint8_t msg[S2N_KYBER_512_R3_INDCPA_MSGBYTES], poly *r);
+
+#define poly_getnoise_eta1 S2N_KYBER_512_R3_NAMESPACE(poly_getnoise_eta1)
+void poly_getnoise_eta1(poly *r, const uint8_t seed[S2N_KYBER_512_R3_SYMBYTES], uint8_t nonce);
+
+#define poly_getnoise_eta2 S2N_KYBER_512_R3_NAMESPACE(poly_getnoise_eta2)
+void poly_getnoise_eta2(poly *r, const uint8_t seed[S2N_KYBER_512_R3_SYMBYTES], uint8_t nonce);
+
+#define poly_ntt S2N_KYBER_512_R3_NAMESPACE(poly_ntt)
+void poly_ntt(poly *r);
+
+#define poly_invntt_tomont S2N_KYBER_512_R3_NAMESPACE(poly_invntt_tomont)
+void poly_invntt_tomont(poly *r);
+
+#define poly_basemul_montgomery S2N_KYBER_512_R3_NAMESPACE(poly_basemul_montgomery)
+void poly_basemul_montgomery(poly *r, const poly *a, const poly *b);
+
+#define poly_tomont S2N_KYBER_512_R3_NAMESPACE(poly_tomont)
+void poly_tomont(poly *r);
+
+#define poly_reduce S2N_KYBER_512_R3_NAMESPACE(poly_reduce)
+void poly_reduce(poly *r);
+
+#define poly_csubq S2N_KYBER_512_R3_NAMESPACE(poly_csubq)
+void poly_csubq(poly *r);
+
+#define poly_add S2N_KYBER_512_R3_NAMESPACE(poly_add)
+void poly_add(poly *r, const poly *a, const poly *b);
+
+#define poly_sub S2N_KYBER_512_R3_NAMESPACE(poly_sub)
+void poly_sub(poly *r, const poly *a, const poly *b);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly_avx2.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly_avx2.c
new file mode 100644
index 0000000000..aa961ff403
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly_avx2.c
@@ -0,0 +1,453 @@
+#include <stdint.h>
+#include <string.h>
+#include "kyber512r3_align_avx2.h"
+#include "kyber512r3_consts_avx2.h"
+#include "kyber512r3_poly_avx2.h"
+#include "kyber512r3_ntt_avx2.h"
+#include "kyber512r3_reduce_avx2.h"
+#include "kyber512r3_cbd_avx2.h"
+#include "kyber512r3_fips202.h"
+#include "kyber512r3_fips202x4_avx2.h"
+#include "kyber512r3_symmetric.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#include <immintrin.h>
+
+/*************************************************
+* Name: poly_compress_avx2
+*
+* Description: Compression and subsequent serialization of a polynomial.
+* The coefficients of the input polynomial are assumed to
+* lie in the invertal [0,q], i.e. the polynomial must be reduced
+* by poly_reduce_avx2().
+*
+* Arguments: - uint8_t *r: pointer to output byte array
+* (of length S2N_KYBER_512_R3_POLYCOMPRESSEDBYTES)
+* - const poly *a: pointer to input polynomial
+**************************************************/
+void poly_compress_avx2(uint8_t r[128], const poly * restrict a)
+{
+ unsigned int i;
+ __m256i f0, f1, f2, f3;
+ const __m256i v = _mm256_load_si256(&qdata.vec[_16XV/16]);
+ const __m256i shift1 = _mm256_set1_epi16(1 << 9);
+ const __m256i mask = _mm256_set1_epi16(15);
+ const __m256i shift2 = _mm256_set1_epi16((16 << 8) + 1);
+ const __m256i permdidx = _mm256_set_epi32(7,3,6,2,5,1,4,0);
+
+ for(i=0;i<S2N_KYBER_512_R3_N/64;i++) {
+ f0 = _mm256_load_si256(&a->vec[4*i+0]);
+ f1 = _mm256_load_si256(&a->vec[4*i+1]);
+ f2 = _mm256_load_si256(&a->vec[4*i+2]);
+ f3 = _mm256_load_si256(&a->vec[4*i+3]);
+ f0 = _mm256_mulhi_epi16(f0,v);
+ f1 = _mm256_mulhi_epi16(f1,v);
+ f2 = _mm256_mulhi_epi16(f2,v);
+ f3 = _mm256_mulhi_epi16(f3,v);
+ f0 = _mm256_mulhrs_epi16(f0,shift1);
+ f1 = _mm256_mulhrs_epi16(f1,shift1);
+ f2 = _mm256_mulhrs_epi16(f2,shift1);
+ f3 = _mm256_mulhrs_epi16(f3,shift1);
+ f0 = _mm256_and_si256(f0,mask);
+ f1 = _mm256_and_si256(f1,mask);
+ f2 = _mm256_and_si256(f2,mask);
+ f3 = _mm256_and_si256(f3,mask);
+ f0 = _mm256_packus_epi16(f0,f1);
+ f2 = _mm256_packus_epi16(f2,f3);
+ f0 = _mm256_maddubs_epi16(f0,shift2);
+ f2 = _mm256_maddubs_epi16(f2,shift2);
+ f0 = _mm256_packus_epi16(f0,f2);
+ f0 = _mm256_permutevar8x32_epi32(f0,permdidx);
+ // correcting cast-align error
+ // old version: _mm256_storeu_si256((__m256i *)&r[32*i],f0);
+ _mm256_storeu_si256((void *)&r[32*i],f0);
+ }
+}
+
+void poly_decompress_avx2(poly * restrict r, const uint8_t a[128])
+{
+ unsigned int i;
+ __m128i t;
+ __m256i f;
+ const __m256i q = _mm256_load_si256(&qdata.vec[_16XQ/16]);
+ const __m256i shufbidx = _mm256_set_epi8(7,7,7,7,6,6,6,6,5,5,5,5,4,4,4,4,
+ 3,3,3,3,2,2,2,2,1,1,1,1,0,0,0,0);
+ const __m256i mask = _mm256_set1_epi32(0x00F0000F);
+ const __m256i shift = _mm256_set1_epi32((128 << 16) + 2048);
+
+ for(i=0;i<S2N_KYBER_512_R3_N/16;i++) {
+ // correcting cast-align and cast-qual errors
+ // old version: t = _mm_loadl_epi64((__m128i *)&a[8*i]);
+ t = _mm_loadl_epi64((const void *)&a[8*i]);
+ f = _mm256_broadcastsi128_si256(t);
+ f = _mm256_shuffle_epi8(f,shufbidx);
+ f = _mm256_and_si256(f,mask);
+ f = _mm256_mullo_epi16(f,shift);
+ f = _mm256_mulhrs_epi16(f,q);
+ _mm256_store_si256(&r->vec[i],f);
+ }
+}
+
+/*************************************************
+* Name: poly_tobytes_avx2
+*
+* Description: Serialization of a polynomial in NTT representation.
+* The coefficients of the input polynomial are assumed to
+* lie in the invertal [0,q], i.e. the polynomial must be reduced
+* by poly_reduce_avx2(). The coefficients are orderd as output by
+* poly_ntt_avx2(); the serialized output coefficients are in bitreversed
+* order.
+*
+* Arguments: - uint8_t *r: pointer to output byte array
+* (needs space for S2N_KYBER_512_R3_POLYBYTES bytes)
+* - poly *a: pointer to input polynomial
+**************************************************/
+void poly_tobytes_avx2(uint8_t r[S2N_KYBER_512_R3_POLYBYTES], const poly *a)
+{
+ ntttobytes_avx2_asm(r, a->vec, qdata.vec);
+}
+
+/*************************************************
+* Name: poly_frombytes_avx2
+*
+* Description: De-serialization of a polynomial;
+* inverse of poly_tobytes_avx2
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const uint8_t *a: pointer to input byte array
+* (of S2N_KYBER_512_R3_POLYBYTES bytes)
+**************************************************/
+void poly_frombytes_avx2(poly *r, const uint8_t a[S2N_KYBER_512_R3_POLYBYTES])
+{
+ nttfrombytes_avx2_asm(r->vec, a, qdata.vec);
+}
+
+/*************************************************
+* Name: poly_frommsg_avx2
+*
+* Description: Convert 32-byte message to polynomial
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const uint8_t *msg: pointer to input message
+**************************************************/
+void poly_frommsg_avx2(poly * restrict r, const uint8_t msg[S2N_KYBER_512_R3_INDCPA_MSGBYTES])
+{
+ __m256i f, g0, g1, g2, g3, h0, h1, h2, h3;
+ const __m256i shift = _mm256_broadcastsi128_si256(_mm_set_epi32(0,1,2,3));
+ const __m256i idx = _mm256_broadcastsi128_si256(_mm_set_epi8(15,14,11,10,7,6,3,2,13,12,9,8,5,4,1,0));
+ const __m256i hqs = _mm256_set1_epi16((S2N_KYBER_512_R3_Q+1)/2);
+
+#define FROMMSG64(i) \
+ g3 = _mm256_shuffle_epi32(f,0x55*i); \
+ g3 = _mm256_sllv_epi32(g3,shift); \
+ g3 = _mm256_shuffle_epi8(g3,idx); \
+ g0 = _mm256_slli_epi16(g3,12); \
+ g1 = _mm256_slli_epi16(g3,8); \
+ g2 = _mm256_slli_epi16(g3,4); \
+ g0 = _mm256_srai_epi16(g0,15); \
+ g1 = _mm256_srai_epi16(g1,15); \
+ g2 = _mm256_srai_epi16(g2,15); \
+ g3 = _mm256_srai_epi16(g3,15); \
+ g0 = _mm256_and_si256(g0,hqs); /* 19 18 17 16 3 2 1 0 */ \
+ g1 = _mm256_and_si256(g1,hqs); /* 23 22 21 20 7 6 5 4 */ \
+ g2 = _mm256_and_si256(g2,hqs); /* 27 26 25 24 11 10 9 8 */ \
+ g3 = _mm256_and_si256(g3,hqs); /* 31 30 29 28 15 14 13 12 */ \
+ h0 = _mm256_unpacklo_epi64(g0,g1); \
+ h2 = _mm256_unpackhi_epi64(g0,g1); \
+ h1 = _mm256_unpacklo_epi64(g2,g3); \
+ h3 = _mm256_unpackhi_epi64(g2,g3); \
+ g0 = _mm256_permute2x128_si256(h0,h1,0x20); \
+ g2 = _mm256_permute2x128_si256(h0,h1,0x31); \
+ g1 = _mm256_permute2x128_si256(h2,h3,0x20); \
+ g3 = _mm256_permute2x128_si256(h2,h3,0x31); \
+ _mm256_store_si256(&r->vec[0+2*i+0],g0); \
+ _mm256_store_si256(&r->vec[0+2*i+1],g1); \
+ _mm256_store_si256(&r->vec[8+2*i+0],g2); \
+ _mm256_store_si256(&r->vec[8+2*i+1],g3)
+
+ // correcting cast-align and cast-qual errors
+ // old version: f = _mm256_loadu_si256((__m256i *)msg);
+ f = _mm256_loadu_si256((const void *)msg);
+ FROMMSG64(0);
+ FROMMSG64(1);
+ FROMMSG64(2);
+ FROMMSG64(3);
+}
+
+/*************************************************
+* Name: poly_tomsg_avx2
+*
+* Description: Convert polynomial to 32-byte message.
+* The coefficients of the input polynomial are assumed to
+* lie in the invertal [0,q], i.e. the polynomial must be reduced
+* by poly_reduce_avx2().
+*
+* Arguments: - uint8_t *msg: pointer to output message
+* - poly *a: pointer to input polynomial
+**************************************************/
+void poly_tomsg_avx2(uint8_t msg[S2N_KYBER_512_R3_INDCPA_MSGBYTES], const poly * restrict a)
+{
+ unsigned int i;
+ uint32_t small;
+ __m256i f0, f1, g0, g1;
+ const __m256i hq = _mm256_set1_epi16((S2N_KYBER_512_R3_Q - 1)/2);
+ const __m256i hhq = _mm256_set1_epi16((S2N_KYBER_512_R3_Q - 1)/4);
+
+ for(i=0;i<S2N_KYBER_512_R3_N/32;i++) {
+ f0 = _mm256_load_si256(&a->vec[2*i+0]);
+ f1 = _mm256_load_si256(&a->vec[2*i+1]);
+ f0 = _mm256_sub_epi16(hq, f0);
+ f1 = _mm256_sub_epi16(hq, f1);
+ g0 = _mm256_srai_epi16(f0, 15);
+ g1 = _mm256_srai_epi16(f1, 15);
+ f0 = _mm256_xor_si256(f0, g0);
+ f1 = _mm256_xor_si256(f1, g1);
+ f0 = _mm256_sub_epi16(f0, hhq);
+ f1 = _mm256_sub_epi16(f1, hhq);
+ f0 = _mm256_packs_epi16(f0, f1);
+ f0 = _mm256_permute4x64_epi64(f0, 0xD8);
+ small = _mm256_movemask_epi8(f0);
+ memcpy(&msg[4*i], &small, 4);
+ }
+}
+
+/*************************************************
+* Name: poly_getnoise_eta1_avx2
+*
+* Description: Sample a polynomial deterministically from a seed and a nonce,
+* with output polynomial close to centered binomial distribution
+* with parameter S2N_KYBER_512_R3_ETA1
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const uint8_t *seed: pointer to input seed
+* (of length S2N_KYBER_512_R3_SYMBYTES bytes)
+* - uint8_t nonce: one-byte input nonce
+**************************************************/
+void poly_getnoise_eta1_avx2(poly *r, const uint8_t seed[S2N_KYBER_512_R3_SYMBYTES], uint8_t nonce)
+{
+ ALIGNED_UINT8(S2N_KYBER_512_R3_ETA1*S2N_KYBER_512_R3_N/4+32) buf; // +32 bytes as required by poly_cbd_eta1_avx2
+ shake256_prf(buf.coeffs, S2N_KYBER_512_R3_ETA1*S2N_KYBER_512_R3_N/4, seed, nonce);
+ poly_cbd_eta1_avx2(r, buf.vec);
+}
+
+/*************************************************
+* Name: poly_getnoise_eta2_avx2
+*
+* Description: Sample a polynomial deterministically from a seed and a nonce,
+* with output polynomial close to centered binomial distribution
+* with parameter S2N_KYBER_512_R3_ETA2
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const uint8_t *seed: pointer to input seed
+* (of length S2N_KYBER_512_R3_SYMBYTES bytes)
+* - uint8_t nonce: one-byte input nonce
+**************************************************/
+void poly_getnoise_eta2_avx2(poly *r, const uint8_t seed[S2N_KYBER_512_R3_SYMBYTES], uint8_t nonce)
+{
+ ALIGNED_UINT8(S2N_KYBER_512_R3_ETA2*S2N_KYBER_512_R3_N/4) buf;
+ shake256_prf(buf.coeffs, S2N_KYBER_512_R3_ETA2*S2N_KYBER_512_R3_N/4, seed, nonce);
+ poly_cbd_eta2_avx2(r, buf.vec);
+}
+
+#define NOISE_NBLOCKS ((S2N_KYBER_512_R3_ETA1*S2N_KYBER_512_R3_N/4+S2N_KYBER_512_R3_SHAKE256_RATE-1)/S2N_KYBER_512_R3_SHAKE256_RATE)
+void poly_getnoise_eta1_4x(poly *r0,
+ poly *r1,
+ poly *r2,
+ poly *r3,
+ const uint8_t seed[32],
+ uint8_t nonce0,
+ uint8_t nonce1,
+ uint8_t nonce2,
+ uint8_t nonce3)
+{
+ ALIGNED_UINT8(NOISE_NBLOCKS*S2N_KYBER_512_R3_SHAKE256_RATE) buf[4];
+ __m256i f;
+ keccakx4_state state;
+
+ // correcting cast-align and cast-qual errors
+ // old version: f = _mm256_loadu_si256((__m256i *)seed);
+ f = _mm256_loadu_si256((const void *)seed);
+ _mm256_store_si256(buf[0].vec, f);
+ _mm256_store_si256(buf[1].vec, f);
+ _mm256_store_si256(buf[2].vec, f);
+ _mm256_store_si256(buf[3].vec, f);
+
+ buf[0].coeffs[32] = nonce0;
+ buf[1].coeffs[32] = nonce1;
+ buf[2].coeffs[32] = nonce2;
+ buf[3].coeffs[32] = nonce3;
+
+ shake256x4_absorb_once(&state, buf[0].coeffs, buf[1].coeffs, buf[2].coeffs, buf[3].coeffs, 33);
+ shake256x4_squeezeblocks(buf[0].coeffs, buf[1].coeffs, buf[2].coeffs, buf[3].coeffs, NOISE_NBLOCKS, &state);
+
+ poly_cbd_eta1_avx2(r0, buf[0].vec);
+ poly_cbd_eta1_avx2(r1, buf[1].vec);
+ poly_cbd_eta1_avx2(r2, buf[2].vec);
+ poly_cbd_eta1_avx2(r3, buf[3].vec);
+}
+
+void poly_getnoise_eta1122_4x(poly *r0,
+ poly *r1,
+ poly *r2,
+ poly *r3,
+ const uint8_t seed[32],
+ uint8_t nonce0,
+ uint8_t nonce1,
+ uint8_t nonce2,
+ uint8_t nonce3)
+{
+ ALIGNED_UINT8(NOISE_NBLOCKS*S2N_KYBER_512_R3_SHAKE256_RATE) buf[4];
+ __m256i f;
+ keccakx4_state state;
+
+ // correcting cast-align and cast-qual errors
+ // old version: f = _mm256_loadu_si256((__m256i *)seed);
+ f = _mm256_loadu_si256((const void *)seed);
+ _mm256_store_si256(buf[0].vec, f);
+ _mm256_store_si256(buf[1].vec, f);
+ _mm256_store_si256(buf[2].vec, f);
+ _mm256_store_si256(buf[3].vec, f);
+
+ buf[0].coeffs[32] = nonce0;
+ buf[1].coeffs[32] = nonce1;
+ buf[2].coeffs[32] = nonce2;
+ buf[3].coeffs[32] = nonce3;
+
+ shake256x4_absorb_once(&state, buf[0].coeffs, buf[1].coeffs, buf[2].coeffs, buf[3].coeffs, 33);
+ shake256x4_squeezeblocks(buf[0].coeffs, buf[1].coeffs, buf[2].coeffs, buf[3].coeffs, NOISE_NBLOCKS, &state);
+
+ poly_cbd_eta1_avx2(r0, buf[0].vec);
+ poly_cbd_eta1_avx2(r1, buf[1].vec);
+ poly_cbd_eta2_avx2(r2, buf[2].vec);
+ poly_cbd_eta2_avx2(r3, buf[3].vec);
+}
+
+/*************************************************
+* Name: poly_ntt_avx2
+*
+* Description: Computes negacyclic number-theoretic transform (NTT) of
+* a polynomial in place.
+* Input coefficients assumed to be in normal order,
+* output coefficients are in special order that is natural
+* for the vectorization. Input coefficients are assumed to be
+* bounded by q in absolute value, output coefficients are bounded
+* by 16118 in absolute value.
+*
+* Arguments: - poly *r: pointer to in/output polynomial
+**************************************************/
+void poly_ntt_avx2(poly *r)
+{
+ ntt_avx2_asm(r->vec, qdata.vec);
+}
+
+/*************************************************
+* Name: poly_invntt_tomont_avx2
+*
+* Description: Computes inverse of negacyclic number-theoretic transform (NTT)
+* of a polynomial in place;
+* Input coefficients assumed to be in special order from vectorized
+* forward ntt, output in normal order. Input coefficients can be
+* arbitrary 16-bit integers, output coefficients are bounded by 14870
+* in absolute value.
+*
+* Arguments: - poly *a: pointer to in/output polynomial
+**************************************************/
+void poly_invntt_tomont_avx2(poly *r)
+{
+ invntt_avx2_asm(r->vec, qdata.vec);
+}
+
+void poly_nttunpack_avx2(poly *r)
+{
+ nttunpack_avx2_asm(r->vec, qdata.vec);
+}
+
+/*************************************************
+* Name: poly_basemul_montgomery_avx2
+*
+* Description: Multiplication of two polynomials in NTT domain.
+* One of the input polynomials needs to have coefficients
+* bounded by q, the other polynomial can have arbitrary
+* coefficients. Output coefficients are bounded by 6656.
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const poly *a: pointer to first input polynomial
+* - const poly *b: pointer to second input polynomial
+**************************************************/
+void poly_basemul_montgomery_avx2(poly *r, const poly *a, const poly *b)
+{
+ basemul_avx2_asm(r->vec, a->vec, b->vec, qdata.vec);
+}
+
+/*************************************************
+* Name: poly_tomont_avx2
+*
+* Description: Inplace conversion of all coefficients of a polynomial
+* from normal domain to Montgomery domain
+*
+* Arguments: - poly *r: pointer to input/output polynomial
+**************************************************/
+void poly_tomont_avx2(poly *r)
+{
+ tomont_avx2_asm(r->vec, qdata.vec);
+}
+
+/*************************************************
+* Name: poly_reduce_avx2
+*
+* Description: Applies Barrett reduction to all coefficients of a polynomial
+* for details of the Barrett reduction see comments in reduce.c
+*
+* Arguments: - poly *r: pointer to input/output polynomial
+**************************************************/
+void poly_reduce_avx2(poly *r)
+{
+ reduce_avx2_asm(r->vec, qdata.vec);
+}
+
+/*************************************************
+* Name: poly_add_avx2
+*
+* Description: Add two polynomials. No modular reduction
+* is performed.
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const poly *a: pointer to first input polynomial
+* - const poly *b: pointer to second input polynomial
+**************************************************/
+void poly_add_avx2(poly *r, const poly *a, const poly *b)
+{
+ unsigned int i;
+ __m256i f0, f1;
+
+ for(i=0;i<S2N_KYBER_512_R3_N/16;i++) {
+ f0 = _mm256_load_si256(&a->vec[i]);
+ f1 = _mm256_load_si256(&b->vec[i]);
+ f0 = _mm256_add_epi16(f0, f1);
+ _mm256_store_si256(&r->vec[i], f0);
+ }
+}
+
+/*************************************************
+* Name: poly_sub_avx2
+*
+* Description: Subtract two polynomials. No modular reduction
+* is performed.
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const poly *a: pointer to first input polynomial
+* - const poly *b: pointer to second input polynomial
+**************************************************/
+void poly_sub_avx2(poly *r, const poly *a, const poly *b)
+{
+ unsigned int i;
+ __m256i f0, f1;
+
+ for(i=0;i<S2N_KYBER_512_R3_N/16;i++) {
+ f0 = _mm256_load_si256(&a->vec[i]);
+ f1 = _mm256_load_si256(&b->vec[i]);
+ f0 = _mm256_sub_epi16(f0, f1);
+ _mm256_store_si256(&r->vec[i], f0);
+ }
+}
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly_avx2.h
new file mode 100644
index 0000000000..bd6e857f79
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_poly_avx2.h
@@ -0,0 +1,80 @@
+#pragma once
+
+#include <stdint.h>
+#include "kyber512r3_align_avx2.h"
+#include "kyber512r3_params.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#define poly S2N_KYBER_512_R3_NAMESPACE(poly)
+typedef ALIGNED_INT16(S2N_KYBER_512_R3_N) poly;
+
+#define poly_compress_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_compress_avx2)
+void poly_compress_avx2(uint8_t r[S2N_KYBER_512_R3_POLYCOMPRESSEDBYTES], const poly *a);
+
+#define poly_decompress_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_decompress_avx2)
+void poly_decompress_avx2(poly *r, const uint8_t a[S2N_KYBER_512_R3_POLYCOMPRESSEDBYTES]);
+
+#define poly_tobytes_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_tobytes_avx2)
+void poly_tobytes_avx2(uint8_t r[S2N_KYBER_512_R3_POLYBYTES], const poly *a);
+
+#define poly_frombytes_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_frombytes_avx2)
+void poly_frombytes_avx2(poly *r, const uint8_t a[S2N_KYBER_512_R3_POLYBYTES]);
+
+#define poly_frommsg_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_frommsg_avx2)
+void poly_frommsg_avx2(poly *r, const uint8_t msg[S2N_KYBER_512_R3_INDCPA_MSGBYTES]);
+
+#define poly_tomsg_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_tomsg_avx2)
+void poly_tomsg_avx2(uint8_t msg[S2N_KYBER_512_R3_INDCPA_MSGBYTES], const poly *r);
+
+#define poly_getnoise_eta1_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_getnoise_eta1_avx2)
+void poly_getnoise_eta1_avx2(poly *r, const uint8_t seed[S2N_KYBER_512_R3_SYMBYTES], uint8_t nonce);
+
+#define poly_getnoise_eta2_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_getnoise_eta2_avx2)
+void poly_getnoise_eta2_avx2(poly *r, const uint8_t seed[S2N_KYBER_512_R3_SYMBYTES], uint8_t nonce);
+
+#define poly_getnoise_eta1_4x S2N_KYBER_512_R3_NAMESPACE(poly_getnoise_eta2_4x)
+void poly_getnoise_eta1_4x(poly *r0,
+ poly *r1,
+ poly *r2,
+ poly *r3,
+ const uint8_t seed[32],
+ uint8_t nonce0,
+ uint8_t nonce1,
+ uint8_t nonce2,
+ uint8_t nonce3);
+
+#define poly_getnoise_eta1122_4x S2N_KYBER_512_R3_NAMESPACE(poly_getnoise_eta1122_4x)
+void poly_getnoise_eta1122_4x(poly *r0,
+ poly *r1,
+ poly *r2,
+ poly *r3,
+ const uint8_t seed[32],
+ uint8_t nonce0,
+ uint8_t nonce1,
+ uint8_t nonce2,
+ uint8_t nonce3);
+
+#define poly_ntt_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_ntt_avx2)
+void poly_ntt_avx2(poly *r);
+
+#define poly_invntt_tomont_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_invntt_tomont_avx2)
+void poly_invntt_tomont_avx2(poly *r);
+
+#define poly_nttunpack_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_nttunpack_avx2)
+void poly_nttunpack_avx2(poly *r);
+
+#define poly_basemul_montgomery_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_basemul_montgomery_avx2)
+void poly_basemul_montgomery_avx2(poly *r, const poly *a, const poly *b);
+
+#define poly_tomont_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_tomont_avx2)
+void poly_tomont_avx2(poly *r);
+
+#define poly_reduce_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_reduce_avx2)
+void poly_reduce_avx2(poly *r);
+
+#define poly_add_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_add_avx2)
+void poly_add_avx2(poly *r, const poly *a, const poly *b);
+
+#define poly_sub_avx2 S2N_KYBER_512_R3_NAMESPACE(poly_sub_avx2)
+void poly_sub_avx2(poly *r, const poly *a, const poly *b);
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec.c
new file mode 100644
index 0000000000..0a84cd092a
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec.c
@@ -0,0 +1,186 @@
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_poly.h"
+#include "kyber512r3_polyvec.h"
+
+/*************************************************
+* Name: polyvec_compress
+*
+* Description: Compress and serialize vector of polynomials
+*
+* Arguments: - uint8_t *r: pointer to output byte array
+* (needs space for S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES)
+* - polyvec *a: pointer to input vector of polynomials
+**************************************************/
+void polyvec_compress(uint8_t r[S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES], polyvec *a) {
+ polyvec_csubq(a);
+
+ uint16_t t[4];
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ for (unsigned int j = 0; j < S2N_KYBER_512_R3_N / 4; j++) {
+ for (unsigned int k = 0; k < 4; k++)
+ t[k] = ((((uint32_t)a->vec[i].coeffs[4 * j + k] << 10) + S2N_KYBER_512_R3_Q / 2)
+ / S2N_KYBER_512_R3_Q) & 0x3ff;
+
+ r[0] = (t[0] >> 0);
+ r[1] = (t[0] >> 8) | (t[1] << 2);
+ r[2] = (t[1] >> 6) | (t[2] << 4);
+ r[3] = (t[2] >> 4) | (t[3] << 6);
+ r[4] = (t[3] >> 2);
+ r += 5;
+ }
+ }
+}
+
+/*************************************************
+* Name: polyvec_decompress
+*
+* Description: De-serialize and decompress vector of polynomials;
+* approximate inverse of polyvec_compress
+*
+* Arguments: - polyvec *r: pointer to output vector of polynomials
+* - const uint8_t *a: pointer to input byte array
+* (of length S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES)
+**************************************************/
+void polyvec_decompress(polyvec *r, const uint8_t a[S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES]) {
+ uint16_t t[4];
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ for (unsigned int j = 0; j < S2N_KYBER_512_R3_N / 4; j++) {
+ t[0] = (a[0] >> 0) | ((uint16_t)a[1] << 8);
+ t[1] = (a[1] >> 2) | ((uint16_t)a[2] << 6);
+ t[2] = (a[2] >> 4) | ((uint16_t)a[3] << 4);
+ t[3] = (a[3] >> 6) | ((uint16_t)a[4] << 2);
+ a += 5;
+
+ for (unsigned int k = 0; k < 4; k++) {
+ r->vec[i].coeffs[4 * j + k] = ((uint32_t)(t[k] & 0x3FF) * S2N_KYBER_512_R3_Q + 512) >> 10;
+ }
+ }
+ }
+}
+
+/*************************************************
+* Name: polyvec_tobytes
+*
+* Description: Serialize vector of polynomials
+*
+* Arguments: - uint8_t *r: pointer to output byte array
+* (needs space for S2N_KYBER_512_R3_POLYVECBYTES)
+* - polyvec *a: pointer to input vector of polynomials
+**************************************************/
+void polyvec_tobytes(uint8_t r[S2N_KYBER_512_R3_POLYVECBYTES], polyvec *a) {
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ poly_tobytes(r + i * S2N_KYBER_512_R3_POLYBYTES, &a->vec[i]);
+ }
+}
+
+/*************************************************
+* Name: polyvec_frombytes
+*
+* Description: De-serialize vector of polynomials;
+* inverse of polyvec_tobytes
+*
+* Arguments: - uint8_t *r: pointer to output byte array
+* - const polyvec *a: pointer to input vector of polynomials
+* (of length S2N_KYBER_512_R3_POLYVECBYTES)
+**************************************************/
+void polyvec_frombytes(polyvec *r, const uint8_t a[S2N_KYBER_512_R3_POLYVECBYTES]) {
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ poly_frombytes(&r->vec[i], a + i * S2N_KYBER_512_R3_POLYBYTES);
+ }
+}
+
+/*************************************************
+* Name: polyvec_ntt
+*
+* Description: Apply forward NTT to all elements of a vector of polynomials
+*
+* Arguments: - polyvec *r: pointer to in/output vector of polynomials
+**************************************************/
+void polyvec_ntt(polyvec *r) {
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ poly_ntt(&r->vec[i]);
+ }
+}
+
+/*************************************************
+* Name: polyvec_invntt_tomont
+*
+* Description: Apply inverse NTT to all elements of a vector of polynomials
+* and multiply by Montgomery factor 2^16
+*
+* Arguments: - polyvec *r: pointer to in/output vector of polynomials
+**************************************************/
+void polyvec_invntt_tomont(polyvec *r) {
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ poly_invntt_tomont(&r->vec[i]);
+ }
+}
+
+/*************************************************
+* Name: polyvec_pointwise_acc_montgomery
+*
+* Description: Pointwise multiply elements of a and b, accumulate into r,
+* and multiply by 2^-16.
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const polyvec *a: pointer to first input vector of polynomials
+* - const polyvec *b: pointer to second input vector of polynomials
+**************************************************/
+void polyvec_pointwise_acc_montgomery(poly *r, const polyvec *a, const polyvec *b) {
+ poly t;
+
+ poly_basemul_montgomery(r, &a->vec[0], &b->vec[0]);
+ for (unsigned int i = 1; i < S2N_KYBER_512_R3_K; i++) {
+ poly_basemul_montgomery(&t, &a->vec[i], &b->vec[i]);
+ poly_add(r, r, &t);
+ }
+
+ poly_reduce(r);
+}
+
+/*************************************************
+* Name: polyvec_reduce
+*
+* Description: Applies Barrett reduction to each coefficient
+* of each element of a vector of polynomials
+* for details of the Barrett reduction see comments in reduce.c
+*
+* Arguments: - poly *r: pointer to input/output polynomial
+**************************************************/
+void polyvec_reduce(polyvec *r) {
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ poly_reduce(&r->vec[i]);
+ }
+}
+
+/*************************************************
+* Name: polyvec_csubq
+*
+* Description: Applies conditional subtraction of q to each coefficient
+* of each element of a vector of polynomials
+* for details of conditional subtraction of q see comments in
+* reduce.c
+*
+* Arguments: - poly *r: pointer to input/output polynomial
+**************************************************/
+void polyvec_csubq(polyvec *r) {
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ poly_csubq(&r->vec[i]);
+ }
+}
+
+/*************************************************
+* Name: polyvec_add
+*
+* Description: Add vectors of polynomials
+*
+* Arguments: - polyvec *r: pointer to output vector of polynomials
+* - const polyvec *a: pointer to first input vector of polynomials
+* - const polyvec *b: pointer to second input vector of polynomials
+**************************************************/
+void polyvec_add(polyvec *r, const polyvec *a, const polyvec *b) {
+ for (unsigned int i = 0; i < S2N_KYBER_512_R3_K; i++) {
+ poly_add(&r->vec[i], &a->vec[i], &b->vec[i]);
+ }
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec.h
new file mode 100644
index 0000000000..797f3c0d31
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec.h
@@ -0,0 +1,40 @@
+#pragma once
+
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_poly.h"
+
+#define polyvec S2N_KYBER_512_R3_NAMESPACE(polyvec)
+typedef struct {
+ poly vec[S2N_KYBER_512_R3_K];
+} polyvec;
+
+#define polyvec_compress S2N_KYBER_512_R3_NAMESPACE(polyvec_compress)
+void polyvec_compress(uint8_t r[S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES], polyvec *a);
+
+#define polyvec_decompress S2N_KYBER_512_R3_NAMESPACE(polyvec_decompress)
+void polyvec_decompress(polyvec *r, const uint8_t a[S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES]);
+
+#define polyvec_tobytes S2N_KYBER_512_R3_NAMESPACE(polyvec_tobytes)
+void polyvec_tobytes(uint8_t r[S2N_KYBER_512_R3_POLYVECBYTES], polyvec *a);
+
+#define polyvec_frombytes S2N_KYBER_512_R3_NAMESPACE(polyvec_frombytes)
+void polyvec_frombytes(polyvec *r, const uint8_t a[S2N_KYBER_512_R3_POLYVECBYTES]);
+
+#define polyvec_ntt S2N_KYBER_512_R3_NAMESPACE(polyvec_ntt)
+void polyvec_ntt(polyvec *r);
+
+#define polyvec_invntt_tomont S2N_KYBER_512_R3_NAMESPACE(polyvec_invntt_tomont)
+void polyvec_invntt_tomont(polyvec *r);
+
+#define polyvec_pointwise_acc_montgomery S2N_KYBER_512_R3_NAMESPACE(polyvec_pointwise_acc_montgomery)
+void polyvec_pointwise_acc_montgomery(poly *r, const polyvec *a, const polyvec *b);
+
+#define polyvec_reduce S2N_KYBER_512_R3_NAMESPACE(polyvec_reduce)
+void polyvec_reduce(polyvec *r);
+
+#define polyvec_csubq S2N_KYBER_512_R3_NAMESPACE(polyvec_csubq)
+void polyvec_csubq(polyvec *r);
+
+#define polyvec_add S2N_KYBER_512_R3_NAMESPACE(polyvec_add)
+void polyvec_add(polyvec *r, const polyvec *a, const polyvec *b);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec_avx2.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec_avx2.c
new file mode 100644
index 0000000000..8434b96d76
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec_avx2.c
@@ -0,0 +1,227 @@
+#include <stdint.h>
+#include <string.h>
+#include "kyber512r3_polyvec_avx2.h"
+#include "kyber512r3_poly_avx2.h"
+#include "kyber512r3_consts_avx2.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#include <immintrin.h>
+
+static void poly_compress10(uint8_t r[320], const poly * restrict a)
+{
+ unsigned int i;
+ __m256i f0, f1, f2;
+ __m128i t0, t1;
+ const __m256i v = _mm256_load_si256(&qdata.vec[_16XV/16]);
+ const __m256i v8 = _mm256_slli_epi16(v,3);
+ const __m256i off = _mm256_set1_epi16(15);
+ const __m256i shift1 = _mm256_set1_epi16(1 << 12);
+ const __m256i mask = _mm256_set1_epi16(1023);
+ const __m256i shift2 = _mm256_set1_epi64x((1024LL << 48) + (1LL << 32) + (1024 << 16) + 1);
+ const __m256i sllvdidx = _mm256_set1_epi64x(12);
+ const __m256i shufbidx = _mm256_set_epi8( 8, 4, 3, 2, 1, 0,-1,-1,-1,-1,-1,-1,12,11,10, 9,
+ -1,-1,-1,-1,-1,-1,12,11,10, 9, 8, 4, 3, 2, 1, 0);
+
+ for(i=0;i<S2N_KYBER_512_R3_N/16;i++) {
+ f0 = _mm256_load_si256(&a->vec[i]);
+ f1 = _mm256_mullo_epi16(f0,v8);
+ f2 = _mm256_add_epi16(f0,off);
+ f0 = _mm256_slli_epi16(f0,3);
+ f0 = _mm256_mulhi_epi16(f0,v);
+ f2 = _mm256_sub_epi16(f1,f2);
+ f1 = _mm256_andnot_si256(f1,f2);
+ f1 = _mm256_srli_epi16(f1,15);
+ f0 = _mm256_sub_epi16(f0,f1);
+ f0 = _mm256_mulhrs_epi16(f0,shift1);
+ f0 = _mm256_and_si256(f0,mask);
+ f0 = _mm256_madd_epi16(f0,shift2);
+ f0 = _mm256_sllv_epi32(f0,sllvdidx);
+ f0 = _mm256_srli_epi64(f0,12);
+ f0 = _mm256_shuffle_epi8(f0,shufbidx);
+ t0 = _mm256_castsi256_si128(f0);
+ t1 = _mm256_extracti128_si256(f0,1);
+ t0 = _mm_blend_epi16(t0,t1,0xE0);
+ // correcting cast-align error
+ // old version: _mm_storeu_si128((__m128i *)&r[20*i+ 0],t0);
+ _mm_storeu_si128((void *)&r[20*i+ 0],t0);
+ memcpy(&r[20*i+16],&t1,4);
+ }
+}
+
+static void poly_decompress10(poly * restrict r, const uint8_t a[320+12])
+{
+ unsigned int i;
+ __m256i f;
+ const __m256i q = _mm256_set1_epi32((S2N_KYBER_512_R3_Q << 16) + 4*S2N_KYBER_512_R3_Q);
+ const __m256i shufbidx = _mm256_set_epi8(11,10,10, 9, 9, 8, 8, 7,
+ 6, 5, 5, 4, 4, 3, 3, 2,
+ 9, 8, 8, 7, 7, 6, 6, 5,
+ 4, 3, 3, 2, 2, 1, 1, 0);
+ const __m256i sllvdidx = _mm256_set1_epi64x(4);
+ const __m256i mask = _mm256_set1_epi32((32736 << 16) + 8184);
+
+ for(i=0;i<S2N_KYBER_512_R3_N/16;i++) {
+ // correcting cast-align and cast-qual errors
+ // old version: f = _mm256_loadu_si256((__m256i *)&a[20*i]);
+ f = _mm256_loadu_si256((const void *)&a[20*i]);
+ f = _mm256_permute4x64_epi64(f,0x94);
+ f = _mm256_shuffle_epi8(f,shufbidx);
+ f = _mm256_sllv_epi32(f,sllvdidx);
+ f = _mm256_srli_epi16(f,1);
+ f = _mm256_and_si256(f,mask);
+ f = _mm256_mulhrs_epi16(f,q);
+ _mm256_store_si256(&r->vec[i],f);
+ }
+}
+
+/*************************************************
+* Name: polyvec_compress_avx2
+*
+* Description: Compress and serialize vector of polynomials
+*
+* Arguments: - uint8_t *r: pointer to output byte array
+* (needs space for S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES)
+* - polyvec *a: pointer to input vector of polynomials
+**************************************************/
+void polyvec_compress_avx2(uint8_t r[S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES+2], const polyvec *a)
+{
+ unsigned int i;
+
+ for(i=0;i<S2N_KYBER_512_R3_K;i++)
+ poly_compress10(&r[320*i],&a->vec[i]);
+}
+
+/*************************************************
+* Name: polyvec_decompress_avx2
+*
+* Description: De-serialize and decompress vector of polynomials;
+* approximate inverse of polyvec_compress_avx2
+*
+* Arguments: - polyvec *r: pointer to output vector of polynomials
+* - const uint8_t *a: pointer to input byte array
+* (of length S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES)
+**************************************************/
+void polyvec_decompress_avx2(polyvec *r, const uint8_t a[S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES+12])
+{
+ unsigned int i;
+
+ for(i=0;i<S2N_KYBER_512_R3_K;i++)
+ poly_decompress10(&r->vec[i],&a[320*i]);
+}
+
+/*************************************************
+* Name: polyvec_tobytes_avx2
+*
+* Description: Serialize vector of polynomials
+*
+* Arguments: - uint8_t *r: pointer to output byte array
+* (needs space for S2N_KYBER_512_R3_POLYVECBYTES)
+* - polyvec *a: pointer to input vector of polynomials
+**************************************************/
+void polyvec_tobytes_avx2(uint8_t r[S2N_KYBER_512_R3_POLYVECBYTES], const polyvec *a)
+{
+ unsigned int i;
+ for(i=0;i<S2N_KYBER_512_R3_K;i++)
+ poly_tobytes_avx2(r+i*S2N_KYBER_512_R3_POLYBYTES, &a->vec[i]);
+}
+
+/*************************************************
+* Name: polyvec_frombytes_avx2
+*
+* Description: De-serialize vector of polynomials;
+* inverse of polyvec_tobytes_avx2
+*
+* Arguments: - uint8_t *r: pointer to output byte array
+* - const polyvec *a: pointer to input vector of polynomials
+* (of length S2N_KYBER_512_R3_POLYVECBYTES)
+**************************************************/
+void polyvec_frombytes_avx2(polyvec *r, const uint8_t a[S2N_KYBER_512_R3_POLYVECBYTES])
+{
+ unsigned int i;
+ for(i=0;i<S2N_KYBER_512_R3_K;i++)
+ poly_frombytes_avx2(&r->vec[i], a+i*S2N_KYBER_512_R3_POLYBYTES);
+}
+
+/*************************************************
+* Name: polyvec_ntt_avx2
+*
+* Description: Apply forward NTT to all elements of a vector of polynomials
+*
+* Arguments: - polyvec *r: pointer to in/output vector of polynomials
+**************************************************/
+void polyvec_ntt_avx2(polyvec *r)
+{
+ unsigned int i;
+ for(i=0;i<S2N_KYBER_512_R3_K;i++)
+ poly_ntt_avx2(&r->vec[i]);
+}
+
+/*************************************************
+* Name: polyvec_invntt_tomont_avx2
+*
+* Description: Apply inverse NTT to all elements of a vector of polynomials
+* and multiply by Montgomery factor 2^16
+*
+* Arguments: - polyvec *r: pointer to in/output vector of polynomials
+**************************************************/
+void polyvec_invntt_tomont_avx2(polyvec *r)
+{
+ unsigned int i;
+ for(i=0;i<S2N_KYBER_512_R3_K;i++)
+ poly_invntt_tomont_avx2(&r->vec[i]);
+}
+
+/*************************************************
+* Name: polyvec_basemul_acc_montgomery_avx2
+*
+* Description: Multiply elements in a and b in NTT domain, accumulate into r,
+* and multiply by 2^-16.
+*
+* Arguments: - poly *r: pointer to output polynomial
+* - const polyvec *a: pointer to first input vector of polynomials
+* - const polyvec *b: pointer to second input vector of polynomials
+**************************************************/
+void polyvec_basemul_acc_montgomery_avx2(poly *r, const polyvec *a, const polyvec *b)
+{
+ unsigned int i;
+ poly tmp;
+
+ poly_basemul_montgomery_avx2(r,&a->vec[0],&b->vec[0]);
+ for(i=1;i<S2N_KYBER_512_R3_K;i++) {
+ poly_basemul_montgomery_avx2(&tmp,&a->vec[i],&b->vec[i]);
+ poly_add_avx2(r,r,&tmp);
+ }
+}
+
+/*************************************************
+* Name: polyvec_reduce_avx2
+*
+* Description: Applies Barrett reduction to each coefficient
+* of each element of a vector of polynomials;
+* for details of the Barrett reduction see comments in reduce.c
+*
+* Arguments: - polyvec *r: pointer to input/output polynomial
+**************************************************/
+void polyvec_reduce_avx2(polyvec *r)
+{
+ unsigned int i;
+ for(i=0;i<S2N_KYBER_512_R3_K;i++)
+ poly_reduce_avx2(&r->vec[i]);
+}
+
+/*************************************************
+* Name: polyvec_add_avx2
+*
+* Description: Add vectors of polynomials
+*
+* Arguments: - polyvec *r: pointer to output vector of polynomials
+* - const polyvec *a: pointer to first input vector of polynomials
+* - const polyvec *b: pointer to second input vector of polynomials
+**************************************************/
+void polyvec_add_avx2(polyvec *r, const polyvec *a, const polyvec *b)
+{
+ unsigned int i;
+ for(i=0;i<S2N_KYBER_512_R3_K;i++)
+ poly_add_avx2(&r->vec[i], &a->vec[i], &b->vec[i]);
+}
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec_avx2.h
new file mode 100644
index 0000000000..536e1b23d0
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_polyvec_avx2.h
@@ -0,0 +1,39 @@
+#pragma once
+
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_poly_avx2.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#define polyvec S2N_KYBER_512_R3_NAMESPACE(polyvec)
+typedef struct{
+ poly vec[S2N_KYBER_512_R3_K];
+} polyvec;
+
+#define polyvec_compress_avx2 S2N_KYBER_512_R3_NAMESPACE(polyvec_compress_avx2)
+void polyvec_compress_avx2(uint8_t r[S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES+2], const polyvec *a);
+
+#define polyvec_decompress_avx2 S2N_KYBER_512_R3_NAMESPACE(polyvec_decompress_avx2)
+void polyvec_decompress_avx2(polyvec *r, const uint8_t a[S2N_KYBER_512_R3_POLYVECCOMPRESSEDBYTES+12]);
+
+#define polyvec_tobytes_avx2 S2N_KYBER_512_R3_NAMESPACE(polyvec_tobytes_avx2)
+void polyvec_tobytes_avx2(uint8_t r[S2N_KYBER_512_R3_POLYVECBYTES], const polyvec *a);
+
+#define polyvec_frombytes_avx2 S2N_KYBER_512_R3_NAMESPACE(polyvec_frombytes_avx2)
+void polyvec_frombytes_avx2(polyvec *r, const uint8_t a[S2N_KYBER_512_R3_POLYVECBYTES]);
+
+#define polyvec_ntt_avx2 S2N_KYBER_512_R3_NAMESPACE(polyvec_ntt_avx2)
+void polyvec_ntt_avx2(polyvec *r);
+
+#define polyvec_invntt_tomont_avx2 S2N_KYBER_512_R3_NAMESPACE(polyvec_invntt_tomont_avx2)
+void polyvec_invntt_tomont_avx2(polyvec *r);
+
+#define polyvec_basemul_acc_montgomery_avx2 S2N_KYBER_512_R3_NAMESPACE(polyvec_basemul_acc_montgomery_avx2)
+void polyvec_basemul_acc_montgomery_avx2(poly *r, const polyvec *a, const polyvec *b);
+
+#define polyvec_reduce_avx2 S2N_KYBER_512_R3_NAMESPACE(polyvec_reduce_avx2)
+void polyvec_reduce_avx2(polyvec *r);
+
+#define polyvec_add_avx2 S2N_KYBER_512_R3_NAMESPACE(polyvec_add_avx2)
+void polyvec_add_avx2(polyvec *r, const polyvec *a, const polyvec *b);
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce.c
new file mode 100644
index 0000000000..6219ad7e88
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce.c
@@ -0,0 +1,60 @@
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_reduce.h"
+
+/*************************************************
+* Name: montgomery_reduce
+*
+* Description: Montgomery reduction; given a 32-bit integer a, computes
+* 16-bit integer congruent to a * R^-1 mod q,
+* where R=2^16
+*
+* Arguments: - int32_t a: input integer to be reduced;
+* has to be in {-q2^15,...,q2^15-1}
+*
+* Returns: integer in {-q+1,...,q-1} congruent to a * R^-1 modulo q.
+**************************************************/
+int16_t montgomery_reduce(int32_t a) {
+ int32_t t;
+ int16_t u;
+
+ u = a * S2N_KYBER_512_R3_QINV;
+ t = (int32_t)u * S2N_KYBER_512_R3_Q;
+ t = a - t;
+ t >>= 16;
+ return t;
+}
+
+/*************************************************
+* Name: barrett_reduce
+*
+* Description: Barrett reduction; given a 16-bit integer a, computes
+* 16-bit integer congruent to a mod q in {0,...,q}
+*
+* Arguments: - int16_t a: input integer to be reduced
+*
+* Returns: integer in {0,...,q} congruent to a modulo q.
+**************************************************/
+int16_t barrett_reduce(int16_t a) {
+ int16_t t;
+ const int16_t v = ((1U << 26) + S2N_KYBER_512_R3_Q / 2) / S2N_KYBER_512_R3_Q;
+
+ t = (int32_t)v * a >> 26;
+ t *= S2N_KYBER_512_R3_Q;
+ return a - t;
+}
+
+/*************************************************
+* Name: csubq
+*
+* Description: Conditionallly subtract q
+*
+* Arguments: - int16_t x: input integer
+*
+* Returns: a - q if a >= q, else a
+**************************************************/
+int16_t csubq(int16_t a) {
+ a -= S2N_KYBER_512_R3_Q;
+ a += (a >> 15) & S2N_KYBER_512_R3_Q;
+ return a;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce.h
new file mode 100644
index 0000000000..bab9fa54f9
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce.h
@@ -0,0 +1,15 @@
+#pragma once
+
+#include <stdint.h>
+#include "kyber512r3_params.h"
+
+#define S2N_KYBER_512_R3_QINV 62209 /* q^-1 mod 2^16 */
+
+#define montgomery_reduce S2N_KYBER_512_R3_NAMESPACE(montgomery_reduce)
+int16_t montgomery_reduce(int32_t a);
+
+#define barrett_reduce S2N_KYBER_512_R3_NAMESPACE(barrett_reduce)
+int16_t barrett_reduce(int16_t a);
+
+#define csubq S2N_KYBER_512_R3_NAMESPACE(csubq)
+int16_t csubq(int16_t x);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce_avx2.h
new file mode 100644
index 0000000000..24f0ede4e0
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_reduce_avx2.h
@@ -0,0 +1,13 @@
+#pragma once
+
+#include "kyber512r3_params.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#include <immintrin.h>
+
+#define reduce_avx2_asm S2N_KYBER_512_R3_NAMESPACE(reduce_avx2_asm)
+void reduce_avx2_asm(__m256i *r, const __m256i *qdata);
+
+#define tomont_avx2_asm S2N_KYBER_512_R3_NAMESPACE(tomont_avx2_asm)
+void tomont_avx2_asm(__m256i *r, const __m256i *qdata);
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_rejsample_avx2.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_rejsample_avx2.c
new file mode 100644
index 0000000000..1461e0b9b1
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_rejsample_avx2.c
@@ -0,0 +1,420 @@
+#include <stdint.h>
+#include <string.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_consts_avx2.h"
+#include "kyber512r3_rejsample_avx2.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#include <immintrin.h>
+
+//#define BMI
+
+#ifndef BMI
+static const uint8_t idx[256][8] = {
+ {-1, -1, -1, -1, -1, -1, -1, -1},
+ { 0, -1, -1, -1, -1, -1, -1, -1},
+ { 2, -1, -1, -1, -1, -1, -1, -1},
+ { 0, 2, -1, -1, -1, -1, -1, -1},
+ { 4, -1, -1, -1, -1, -1, -1, -1},
+ { 0, 4, -1, -1, -1, -1, -1, -1},
+ { 2, 4, -1, -1, -1, -1, -1, -1},
+ { 0, 2, 4, -1, -1, -1, -1, -1},
+ { 6, -1, -1, -1, -1, -1, -1, -1},
+ { 0, 6, -1, -1, -1, -1, -1, -1},
+ { 2, 6, -1, -1, -1, -1, -1, -1},
+ { 0, 2, 6, -1, -1, -1, -1, -1},
+ { 4, 6, -1, -1, -1, -1, -1, -1},
+ { 0, 4, 6, -1, -1, -1, -1, -1},
+ { 2, 4, 6, -1, -1, -1, -1, -1},
+ { 0, 2, 4, 6, -1, -1, -1, -1},
+ { 8, -1, -1, -1, -1, -1, -1, -1},
+ { 0, 8, -1, -1, -1, -1, -1, -1},
+ { 2, 8, -1, -1, -1, -1, -1, -1},
+ { 0, 2, 8, -1, -1, -1, -1, -1},
+ { 4, 8, -1, -1, -1, -1, -1, -1},
+ { 0, 4, 8, -1, -1, -1, -1, -1},
+ { 2, 4, 8, -1, -1, -1, -1, -1},
+ { 0, 2, 4, 8, -1, -1, -1, -1},
+ { 6, 8, -1, -1, -1, -1, -1, -1},
+ { 0, 6, 8, -1, -1, -1, -1, -1},
+ { 2, 6, 8, -1, -1, -1, -1, -1},
+ { 0, 2, 6, 8, -1, -1, -1, -1},
+ { 4, 6, 8, -1, -1, -1, -1, -1},
+ { 0, 4, 6, 8, -1, -1, -1, -1},
+ { 2, 4, 6, 8, -1, -1, -1, -1},
+ { 0, 2, 4, 6, 8, -1, -1, -1},
+ {10, -1, -1, -1, -1, -1, -1, -1},
+ { 0, 10, -1, -1, -1, -1, -1, -1},
+ { 2, 10, -1, -1, -1, -1, -1, -1},
+ { 0, 2, 10, -1, -1, -1, -1, -1},
+ { 4, 10, -1, -1, -1, -1, -1, -1},
+ { 0, 4, 10, -1, -1, -1, -1, -1},
+ { 2, 4, 10, -1, -1, -1, -1, -1},
+ { 0, 2, 4, 10, -1, -1, -1, -1},
+ { 6, 10, -1, -1, -1, -1, -1, -1},
+ { 0, 6, 10, -1, -1, -1, -1, -1},
+ { 2, 6, 10, -1, -1, -1, -1, -1},
+ { 0, 2, 6, 10, -1, -1, -1, -1},
+ { 4, 6, 10, -1, -1, -1, -1, -1},
+ { 0, 4, 6, 10, -1, -1, -1, -1},
+ { 2, 4, 6, 10, -1, -1, -1, -1},
+ { 0, 2, 4, 6, 10, -1, -1, -1},
+ { 8, 10, -1, -1, -1, -1, -1, -1},
+ { 0, 8, 10, -1, -1, -1, -1, -1},
+ { 2, 8, 10, -1, -1, -1, -1, -1},
+ { 0, 2, 8, 10, -1, -1, -1, -1},
+ { 4, 8, 10, -1, -1, -1, -1, -1},
+ { 0, 4, 8, 10, -1, -1, -1, -1},
+ { 2, 4, 8, 10, -1, -1, -1, -1},
+ { 0, 2, 4, 8, 10, -1, -1, -1},
+ { 6, 8, 10, -1, -1, -1, -1, -1},
+ { 0, 6, 8, 10, -1, -1, -1, -1},
+ { 2, 6, 8, 10, -1, -1, -1, -1},
+ { 0, 2, 6, 8, 10, -1, -1, -1},
+ { 4, 6, 8, 10, -1, -1, -1, -1},
+ { 0, 4, 6, 8, 10, -1, -1, -1},
+ { 2, 4, 6, 8, 10, -1, -1, -1},
+ { 0, 2, 4, 6, 8, 10, -1, -1},
+ {12, -1, -1, -1, -1, -1, -1, -1},
+ { 0, 12, -1, -1, -1, -1, -1, -1},
+ { 2, 12, -1, -1, -1, -1, -1, -1},
+ { 0, 2, 12, -1, -1, -1, -1, -1},
+ { 4, 12, -1, -1, -1, -1, -1, -1},
+ { 0, 4, 12, -1, -1, -1, -1, -1},
+ { 2, 4, 12, -1, -1, -1, -1, -1},
+ { 0, 2, 4, 12, -1, -1, -1, -1},
+ { 6, 12, -1, -1, -1, -1, -1, -1},
+ { 0, 6, 12, -1, -1, -1, -1, -1},
+ { 2, 6, 12, -1, -1, -1, -1, -1},
+ { 0, 2, 6, 12, -1, -1, -1, -1},
+ { 4, 6, 12, -1, -1, -1, -1, -1},
+ { 0, 4, 6, 12, -1, -1, -1, -1},
+ { 2, 4, 6, 12, -1, -1, -1, -1},
+ { 0, 2, 4, 6, 12, -1, -1, -1},
+ { 8, 12, -1, -1, -1, -1, -1, -1},
+ { 0, 8, 12, -1, -1, -1, -1, -1},
+ { 2, 8, 12, -1, -1, -1, -1, -1},
+ { 0, 2, 8, 12, -1, -1, -1, -1},
+ { 4, 8, 12, -1, -1, -1, -1, -1},
+ { 0, 4, 8, 12, -1, -1, -1, -1},
+ { 2, 4, 8, 12, -1, -1, -1, -1},
+ { 0, 2, 4, 8, 12, -1, -1, -1},
+ { 6, 8, 12, -1, -1, -1, -1, -1},
+ { 0, 6, 8, 12, -1, -1, -1, -1},
+ { 2, 6, 8, 12, -1, -1, -1, -1},
+ { 0, 2, 6, 8, 12, -1, -1, -1},
+ { 4, 6, 8, 12, -1, -1, -1, -1},
+ { 0, 4, 6, 8, 12, -1, -1, -1},
+ { 2, 4, 6, 8, 12, -1, -1, -1},
+ { 0, 2, 4, 6, 8, 12, -1, -1},
+ {10, 12, -1, -1, -1, -1, -1, -1},
+ { 0, 10, 12, -1, -1, -1, -1, -1},
+ { 2, 10, 12, -1, -1, -1, -1, -1},
+ { 0, 2, 10, 12, -1, -1, -1, -1},
+ { 4, 10, 12, -1, -1, -1, -1, -1},
+ { 0, 4, 10, 12, -1, -1, -1, -1},
+ { 2, 4, 10, 12, -1, -1, -1, -1},
+ { 0, 2, 4, 10, 12, -1, -1, -1},
+ { 6, 10, 12, -1, -1, -1, -1, -1},
+ { 0, 6, 10, 12, -1, -1, -1, -1},
+ { 2, 6, 10, 12, -1, -1, -1, -1},
+ { 0, 2, 6, 10, 12, -1, -1, -1},
+ { 4, 6, 10, 12, -1, -1, -1, -1},
+ { 0, 4, 6, 10, 12, -1, -1, -1},
+ { 2, 4, 6, 10, 12, -1, -1, -1},
+ { 0, 2, 4, 6, 10, 12, -1, -1},
+ { 8, 10, 12, -1, -1, -1, -1, -1},
+ { 0, 8, 10, 12, -1, -1, -1, -1},
+ { 2, 8, 10, 12, -1, -1, -1, -1},
+ { 0, 2, 8, 10, 12, -1, -1, -1},
+ { 4, 8, 10, 12, -1, -1, -1, -1},
+ { 0, 4, 8, 10, 12, -1, -1, -1},
+ { 2, 4, 8, 10, 12, -1, -1, -1},
+ { 0, 2, 4, 8, 10, 12, -1, -1},
+ { 6, 8, 10, 12, -1, -1, -1, -1},
+ { 0, 6, 8, 10, 12, -1, -1, -1},
+ { 2, 6, 8, 10, 12, -1, -1, -1},
+ { 0, 2, 6, 8, 10, 12, -1, -1},
+ { 4, 6, 8, 10, 12, -1, -1, -1},
+ { 0, 4, 6, 8, 10, 12, -1, -1},
+ { 2, 4, 6, 8, 10, 12, -1, -1},
+ { 0, 2, 4, 6, 8, 10, 12, -1},
+ {14, -1, -1, -1, -1, -1, -1, -1},
+ { 0, 14, -1, -1, -1, -1, -1, -1},
+ { 2, 14, -1, -1, -1, -1, -1, -1},
+ { 0, 2, 14, -1, -1, -1, -1, -1},
+ { 4, 14, -1, -1, -1, -1, -1, -1},
+ { 0, 4, 14, -1, -1, -1, -1, -1},
+ { 2, 4, 14, -1, -1, -1, -1, -1},
+ { 0, 2, 4, 14, -1, -1, -1, -1},
+ { 6, 14, -1, -1, -1, -1, -1, -1},
+ { 0, 6, 14, -1, -1, -1, -1, -1},
+ { 2, 6, 14, -1, -1, -1, -1, -1},
+ { 0, 2, 6, 14, -1, -1, -1, -1},
+ { 4, 6, 14, -1, -1, -1, -1, -1},
+ { 0, 4, 6, 14, -1, -1, -1, -1},
+ { 2, 4, 6, 14, -1, -1, -1, -1},
+ { 0, 2, 4, 6, 14, -1, -1, -1},
+ { 8, 14, -1, -1, -1, -1, -1, -1},
+ { 0, 8, 14, -1, -1, -1, -1, -1},
+ { 2, 8, 14, -1, -1, -1, -1, -1},
+ { 0, 2, 8, 14, -1, -1, -1, -1},
+ { 4, 8, 14, -1, -1, -1, -1, -1},
+ { 0, 4, 8, 14, -1, -1, -1, -1},
+ { 2, 4, 8, 14, -1, -1, -1, -1},
+ { 0, 2, 4, 8, 14, -1, -1, -1},
+ { 6, 8, 14, -1, -1, -1, -1, -1},
+ { 0, 6, 8, 14, -1, -1, -1, -1},
+ { 2, 6, 8, 14, -1, -1, -1, -1},
+ { 0, 2, 6, 8, 14, -1, -1, -1},
+ { 4, 6, 8, 14, -1, -1, -1, -1},
+ { 0, 4, 6, 8, 14, -1, -1, -1},
+ { 2, 4, 6, 8, 14, -1, -1, -1},
+ { 0, 2, 4, 6, 8, 14, -1, -1},
+ {10, 14, -1, -1, -1, -1, -1, -1},
+ { 0, 10, 14, -1, -1, -1, -1, -1},
+ { 2, 10, 14, -1, -1, -1, -1, -1},
+ { 0, 2, 10, 14, -1, -1, -1, -1},
+ { 4, 10, 14, -1, -1, -1, -1, -1},
+ { 0, 4, 10, 14, -1, -1, -1, -1},
+ { 2, 4, 10, 14, -1, -1, -1, -1},
+ { 0, 2, 4, 10, 14, -1, -1, -1},
+ { 6, 10, 14, -1, -1, -1, -1, -1},
+ { 0, 6, 10, 14, -1, -1, -1, -1},
+ { 2, 6, 10, 14, -1, -1, -1, -1},
+ { 0, 2, 6, 10, 14, -1, -1, -1},
+ { 4, 6, 10, 14, -1, -1, -1, -1},
+ { 0, 4, 6, 10, 14, -1, -1, -1},
+ { 2, 4, 6, 10, 14, -1, -1, -1},
+ { 0, 2, 4, 6, 10, 14, -1, -1},
+ { 8, 10, 14, -1, -1, -1, -1, -1},
+ { 0, 8, 10, 14, -1, -1, -1, -1},
+ { 2, 8, 10, 14, -1, -1, -1, -1},
+ { 0, 2, 8, 10, 14, -1, -1, -1},
+ { 4, 8, 10, 14, -1, -1, -1, -1},
+ { 0, 4, 8, 10, 14, -1, -1, -1},
+ { 2, 4, 8, 10, 14, -1, -1, -1},
+ { 0, 2, 4, 8, 10, 14, -1, -1},
+ { 6, 8, 10, 14, -1, -1, -1, -1},
+ { 0, 6, 8, 10, 14, -1, -1, -1},
+ { 2, 6, 8, 10, 14, -1, -1, -1},
+ { 0, 2, 6, 8, 10, 14, -1, -1},
+ { 4, 6, 8, 10, 14, -1, -1, -1},
+ { 0, 4, 6, 8, 10, 14, -1, -1},
+ { 2, 4, 6, 8, 10, 14, -1, -1},
+ { 0, 2, 4, 6, 8, 10, 14, -1},
+ {12, 14, -1, -1, -1, -1, -1, -1},
+ { 0, 12, 14, -1, -1, -1, -1, -1},
+ { 2, 12, 14, -1, -1, -1, -1, -1},
+ { 0, 2, 12, 14, -1, -1, -1, -1},
+ { 4, 12, 14, -1, -1, -1, -1, -1},
+ { 0, 4, 12, 14, -1, -1, -1, -1},
+ { 2, 4, 12, 14, -1, -1, -1, -1},
+ { 0, 2, 4, 12, 14, -1, -1, -1},
+ { 6, 12, 14, -1, -1, -1, -1, -1},
+ { 0, 6, 12, 14, -1, -1, -1, -1},
+ { 2, 6, 12, 14, -1, -1, -1, -1},
+ { 0, 2, 6, 12, 14, -1, -1, -1},
+ { 4, 6, 12, 14, -1, -1, -1, -1},
+ { 0, 4, 6, 12, 14, -1, -1, -1},
+ { 2, 4, 6, 12, 14, -1, -1, -1},
+ { 0, 2, 4, 6, 12, 14, -1, -1},
+ { 8, 12, 14, -1, -1, -1, -1, -1},
+ { 0, 8, 12, 14, -1, -1, -1, -1},
+ { 2, 8, 12, 14, -1, -1, -1, -1},
+ { 0, 2, 8, 12, 14, -1, -1, -1},
+ { 4, 8, 12, 14, -1, -1, -1, -1},
+ { 0, 4, 8, 12, 14, -1, -1, -1},
+ { 2, 4, 8, 12, 14, -1, -1, -1},
+ { 0, 2, 4, 8, 12, 14, -1, -1},
+ { 6, 8, 12, 14, -1, -1, -1, -1},
+ { 0, 6, 8, 12, 14, -1, -1, -1},
+ { 2, 6, 8, 12, 14, -1, -1, -1},
+ { 0, 2, 6, 8, 12, 14, -1, -1},
+ { 4, 6, 8, 12, 14, -1, -1, -1},
+ { 0, 4, 6, 8, 12, 14, -1, -1},
+ { 2, 4, 6, 8, 12, 14, -1, -1},
+ { 0, 2, 4, 6, 8, 12, 14, -1},
+ {10, 12, 14, -1, -1, -1, -1, -1},
+ { 0, 10, 12, 14, -1, -1, -1, -1},
+ { 2, 10, 12, 14, -1, -1, -1, -1},
+ { 0, 2, 10, 12, 14, -1, -1, -1},
+ { 4, 10, 12, 14, -1, -1, -1, -1},
+ { 0, 4, 10, 12, 14, -1, -1, -1},
+ { 2, 4, 10, 12, 14, -1, -1, -1},
+ { 0, 2, 4, 10, 12, 14, -1, -1},
+ { 6, 10, 12, 14, -1, -1, -1, -1},
+ { 0, 6, 10, 12, 14, -1, -1, -1},
+ { 2, 6, 10, 12, 14, -1, -1, -1},
+ { 0, 2, 6, 10, 12, 14, -1, -1},
+ { 4, 6, 10, 12, 14, -1, -1, -1},
+ { 0, 4, 6, 10, 12, 14, -1, -1},
+ { 2, 4, 6, 10, 12, 14, -1, -1},
+ { 0, 2, 4, 6, 10, 12, 14, -1},
+ { 8, 10, 12, 14, -1, -1, -1, -1},
+ { 0, 8, 10, 12, 14, -1, -1, -1},
+ { 2, 8, 10, 12, 14, -1, -1, -1},
+ { 0, 2, 8, 10, 12, 14, -1, -1},
+ { 4, 8, 10, 12, 14, -1, -1, -1},
+ { 0, 4, 8, 10, 12, 14, -1, -1},
+ { 2, 4, 8, 10, 12, 14, -1, -1},
+ { 0, 2, 4, 8, 10, 12, 14, -1},
+ { 6, 8, 10, 12, 14, -1, -1, -1},
+ { 0, 6, 8, 10, 12, 14, -1, -1},
+ { 2, 6, 8, 10, 12, 14, -1, -1},
+ { 0, 2, 6, 8, 10, 12, 14, -1},
+ { 4, 6, 8, 10, 12, 14, -1, -1},
+ { 0, 4, 6, 8, 10, 12, 14, -1},
+ { 2, 4, 6, 8, 10, 12, 14, -1},
+ { 0, 2, 4, 6, 8, 10, 12, 14}
+};
+#endif
+
+#define _mm256_cmpge_epu16(a, b) _mm256_cmpeq_epi16(_mm256_max_epu16(a, b), a)
+#define _mm_cmpge_epu16(a, b) _mm_cmpeq_epi16(_mm_max_epu16(a, b), a)
+
+unsigned int rej_uniform_avx2(int16_t * restrict r, const uint8_t *buf)
+{
+ unsigned int ctr, pos;
+ uint16_t val0, val1;
+ uint32_t good;
+#ifdef BMI
+ uint64_t idx0, idx1, idx2, idx3;
+#endif
+ const __m256i bound = _mm256_load_si256(&qdata.vec[_16XQ/16]);
+ const __m256i ones = _mm256_set1_epi8(1);
+ const __m256i mask = _mm256_set1_epi16(0xFFF);
+ const __m256i idx8 = _mm256_set_epi8(15,14,14,13,12,11,11,10,
+ 9, 8, 8, 7, 6, 5, 5, 4,
+ 11,10,10, 9, 8, 7, 7, 6,
+ 5, 4, 4, 3, 2, 1, 1, 0);
+ __m256i f0, f1, g0, g1, g2, g3;
+ __m128i f, t, pilo, pihi;
+
+ ctr = pos = 0;
+ while(ctr <= S2N_KYBER_512_R3_N - 32 && pos <= S2N_KYBER_512_R3_REJ_UNIFORM_AVX_BUFLEN - 48) {
+ // correcting cast-align and cast-qual errors
+ // old version: f0 = _mm256_loadu_si256((__m256i *)&buf[pos]);
+ f0 = _mm256_loadu_si256((const void *)&buf[pos]);
+ // old version: f1 = _mm256_loadu_si256((__m256i *)&buf[pos+24]);
+ f1 = _mm256_loadu_si256((const void *)&buf[pos+24]);
+ f0 = _mm256_permute4x64_epi64(f0, 0x94);
+ f1 = _mm256_permute4x64_epi64(f1, 0x94);
+ f0 = _mm256_shuffle_epi8(f0, idx8);
+ f1 = _mm256_shuffle_epi8(f1, idx8);
+ g0 = _mm256_srli_epi16(f0, 4);
+ g1 = _mm256_srli_epi16(f1, 4);
+ f0 = _mm256_blend_epi16(f0, g0, 0xAA);
+ f1 = _mm256_blend_epi16(f1, g1, 0xAA);
+ f0 = _mm256_and_si256(f0, mask);
+ f1 = _mm256_and_si256(f1, mask);
+ pos += 48;
+
+ g0 = _mm256_cmpgt_epi16(bound, f0);
+ g1 = _mm256_cmpgt_epi16(bound, f1);
+
+ g0 = _mm256_packs_epi16(g0, g1);
+ good = _mm256_movemask_epi8(g0);
+
+#ifdef BMI
+ idx0 = _pdep_u64(good >> 0, 0x0101010101010101);
+ idx1 = _pdep_u64(good >> 8, 0x0101010101010101);
+ idx2 = _pdep_u64(good >> 16, 0x0101010101010101);
+ idx3 = _pdep_u64(good >> 24, 0x0101010101010101);
+ idx0 = (idx0 << 8) - idx0;
+ idx0 = _pext_u64(0x0E0C0A0806040200, idx0);
+ idx1 = (idx1 << 8) - idx1;
+ idx1 = _pext_u64(0x0E0C0A0806040200, idx1);
+ idx2 = (idx2 << 8) - idx2;
+ idx2 = _pext_u64(0x0E0C0A0806040200, idx2);
+ idx3 = (idx3 << 8) - idx3;
+ idx3 = _pext_u64(0x0E0C0A0806040200, idx3);
+
+ g0 = _mm256_castsi128_si256(_mm_cvtsi64_si128(idx0));
+ g1 = _mm256_castsi128_si256(_mm_cvtsi64_si128(idx1));
+ g0 = _mm256_inserti128_si256(g0, _mm_cvtsi64_si128(idx2), 1);
+ g1 = _mm256_inserti128_si256(g1, _mm_cvtsi64_si128(idx3), 1);
+#else
+ // correcting cast-align and cast-qual errors
+ // old version: g0 = _mm256_castsi128_si256(_mm_loadl_epi64((__m128i *)&idx[(good >> 0) & 0xFF]));
+ g0 = _mm256_castsi128_si256(_mm_loadl_epi64((const void *)&idx[(good >> 0) & 0xFF]));
+ // old version: g1 = _mm256_castsi128_si256(_mm_loadl_epi64((__m128i *)&idx[(good >> 8) & 0xFF]));
+ g1 = _mm256_castsi128_si256(_mm_loadl_epi64((const void *)&idx[(good >> 8) & 0xFF]));
+ // old version: g0 = _mm256_inserti128_si256(g0, _mm_loadl_epi64((__m128i *)&idx[(good >> 16) & 0xFF]), 1);
+ g0 = _mm256_inserti128_si256(g0, _mm_loadl_epi64((const void *)&idx[(good >> 16) & 0xFF]), 1);
+ // old version: g1 = _mm256_inserti128_si256(g1, _mm_loadl_epi64((__m128i *)&idx[(good >> 24) & 0xFF]), 1);
+ g1 = _mm256_inserti128_si256(g1, _mm_loadl_epi64((const void *)&idx[(good >> 24) & 0xFF]), 1);
+#endif
+
+ g2 = _mm256_add_epi8(g0, ones);
+ g3 = _mm256_add_epi8(g1, ones);
+ g0 = _mm256_unpacklo_epi8(g0, g2);
+ g1 = _mm256_unpacklo_epi8(g1, g3);
+
+ f0 = _mm256_shuffle_epi8(f0, g0);
+ f1 = _mm256_shuffle_epi8(f1, g1);
+
+ // correcting cast-align errors
+ // old version: _mm_storeu_si128((__m128i *)&r[ctr], _mm256_castsi256_si128(f0));
+ _mm_storeu_si128((void *)&r[ctr], _mm256_castsi256_si128(f0));
+ ctr += _mm_popcnt_u32((good >> 0) & 0xFF);
+ // old version: _mm_storeu_si128((__m128i *)&r[ctr], _mm256_extracti128_si256(f0, 1));
+ _mm_storeu_si128((void *)&r[ctr], _mm256_extracti128_si256(f0, 1));
+ ctr += _mm_popcnt_u32((good >> 16) & 0xFF);
+ // old version: _mm_storeu_si128((__m128i *)&r[ctr], _mm256_castsi256_si128(f1));
+ _mm_storeu_si128((void *)&r[ctr], _mm256_castsi256_si128(f1));
+ ctr += _mm_popcnt_u32((good >> 8) & 0xFF);
+ // old version: _mm_storeu_si128((__m128i *)&r[ctr], _mm256_extracti128_si256(f1, 1));
+ _mm_storeu_si128((void *)&r[ctr], _mm256_extracti128_si256(f1, 1));
+ ctr += _mm_popcnt_u32((good >> 24) & 0xFF);
+ }
+
+ while(ctr <= S2N_KYBER_512_R3_N - 8 && pos <= S2N_KYBER_512_R3_REJ_UNIFORM_AVX_BUFLEN - 12) {
+ // correcting cast-align and cast-qual errors
+ // old version: f = _mm_loadu_si128((__m128i *)&buf[pos]);
+ f = _mm_loadu_si128((const void *)&buf[pos]);
+ f = _mm_shuffle_epi8(f, _mm256_castsi256_si128(idx8));
+ t = _mm_srli_epi16(f, 4);
+ f = _mm_blend_epi16(f, t, 0xAA);
+ f = _mm_and_si128(f, _mm256_castsi256_si128(mask));
+ pos += 12;
+
+ t = _mm_cmpgt_epi16(_mm256_castsi256_si128(bound), f);
+ good = _mm_movemask_epi8(t);
+
+#ifdef BMI
+ good &= 0x5555;
+ idx0 = _pdep_u64(good, 0x1111111111111111);
+ idx0 = (idx0 << 8) - idx0;
+ idx0 = _pext_u64(0x0E0C0A0806040200, idx0);
+ pilo = _mm_cvtsi64_si128(idx0);
+#else
+ good = _pext_u32(good, 0x5555);
+ // correcting cast-align and cast-qual errors
+ // old version: pilo = _mm_loadl_epi64((__m128i *)&idx[good]);
+ pilo = _mm_loadl_epi64((const void *)&idx[good]);
+#endif
+
+ pihi = _mm_add_epi8(pilo, _mm256_castsi256_si128(ones));
+ pilo = _mm_unpacklo_epi8(pilo, pihi);
+ f = _mm_shuffle_epi8(f, pilo);
+ // correcting cast-align error
+ // old version: _mm_storeu_si128((__m128i *)&r[ctr], f);
+ _mm_storeu_si128((void *)&r[ctr], f);
+ ctr += _mm_popcnt_u32(good);
+ }
+
+ while(ctr < S2N_KYBER_512_R3_N && pos <= S2N_KYBER_512_R3_REJ_UNIFORM_AVX_BUFLEN - 3) {
+ val0 = ((buf[pos+0] >> 0) | ((uint16_t)buf[pos+1] << 8)) & 0xFFF;
+ val1 = ((buf[pos+1] >> 4) | ((uint16_t)buf[pos+2] << 4));
+ pos += 3;
+
+ if(val0 < S2N_KYBER_512_R3_Q)
+ r[ctr++] = val0;
+ if(val1 < S2N_KYBER_512_R3_Q && ctr < S2N_KYBER_512_R3_N)
+ r[ctr++] = val1;
+ }
+
+ return ctr;
+}
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_rejsample_avx2.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_rejsample_avx2.h
new file mode 100644
index 0000000000..bd8a970464
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_rejsample_avx2.h
@@ -0,0 +1,14 @@
+#pragma once
+
+#include <stdint.h>
+#include "kyber512r3_params.h"
+#include "kyber512r3_fips202.h"
+
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+#define S2N_KYBER_512_R3_XOF_BLOCKBYTES S2N_KYBER_512_R3_SHAKE128_RATE
+#define S2N_KYBER_512_R3_REJ_UNIFORM_AVX_NBLOCKS ((12*S2N_KYBER_512_R3_N/8*(1 << 12)/S2N_KYBER_512_R3_Q + S2N_KYBER_512_R3_XOF_BLOCKBYTES)/S2N_KYBER_512_R3_XOF_BLOCKBYTES)
+#define S2N_KYBER_512_R3_REJ_UNIFORM_AVX_BUFLEN (S2N_KYBER_512_R3_REJ_UNIFORM_AVX_NBLOCKS*S2N_KYBER_512_R3_XOF_BLOCKBYTES)
+
+#define rej_uniform_avx2 S2N_KYBER_512_R3_NAMESPACE(rej_uniform_avx2)
+unsigned int rej_uniform_avx2(int16_t *r, const uint8_t *buf);
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_shuffle_avx2.S b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_shuffle_avx2.S
new file mode 100644
index 0000000000..ce7200e5ca
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_shuffle_avx2.S
@@ -0,0 +1,272 @@
+#include "kyber512r3_consts_avx2.h"
+
+// The small macros (.inc files) are combined with .S files directly
+/*****.include "fq.inc"*****/
+/***************************/
+.macro red16 r,rs=0,x=12
+vpmulhw %ymm1,%ymm\r,%ymm\x
+.if \rs
+vpmulhrsw %ymm\rs,%ymm\x,%ymm\x
+.else
+vpsraw $10,%ymm\x,%ymm\x
+.endif
+vpmullw %ymm0,%ymm\x,%ymm\x
+vpsubw %ymm\x,%ymm\r,%ymm\r
+.endm
+
+.macro csubq r,x=12
+vpsubw %ymm0,%ymm\r,%ymm\r
+vpsraw $15,%ymm\r,%ymm\x
+vpand %ymm0,%ymm\x,%ymm\x
+vpaddw %ymm\x,%ymm\r,%ymm\r
+.endm
+
+.macro caddq r,x=12
+vpsraw $15,%ymm\r,%ymm\x
+vpand %ymm0,%ymm\x,%ymm\x
+vpaddw %ymm\x,%ymm\r,%ymm\r
+.endm
+
+.macro fqmulprecomp al,ah,b,x=12
+vpmullw %ymm\al,%ymm\b,%ymm\x
+vpmulhw %ymm\ah,%ymm\b,%ymm\b
+vpmulhw %ymm0,%ymm\x,%ymm\x
+vpsubw %ymm\x,%ymm\b,%ymm\b
+.endm
+/***************************/
+
+/*****.include "shuffle.inc"*****/
+/********************************/
+.macro shuffle8 r0,r1,r2,r3
+vperm2i128 $0x20,%ymm\r1,%ymm\r0,%ymm\r2
+vperm2i128 $0x31,%ymm\r1,%ymm\r0,%ymm\r3
+.endm
+
+.macro shuffle4 r0,r1,r2,r3
+vpunpcklqdq %ymm\r1,%ymm\r0,%ymm\r2
+vpunpckhqdq %ymm\r1,%ymm\r0,%ymm\r3
+.endm
+
+.macro shuffle2 r0,r1,r2,r3
+#vpsllq $32,%ymm\r1,%ymm\r2
+vmovsldup %ymm\r1,%ymm\r2
+vpblendd $0xAA,%ymm\r2,%ymm\r0,%ymm\r2
+vpsrlq $32,%ymm\r0,%ymm\r0
+#vmovshdup %ymm\r0,%ymm\r0
+vpblendd $0xAA,%ymm\r1,%ymm\r0,%ymm\r3
+.endm
+
+.macro shuffle1 r0,r1,r2,r3
+vpslld $16,%ymm\r1,%ymm\r2
+vpblendw $0xAA,%ymm\r2,%ymm\r0,%ymm\r2
+vpsrld $16,%ymm\r0,%ymm\r0
+vpblendw $0xAA,%ymm\r1,%ymm\r0,%ymm\r3
+.endm
+/********************************/
+
+.text
+nttunpack128_avx:
+#load
+vmovdqa (%rdi),%ymm4
+vmovdqa 32(%rdi),%ymm5
+vmovdqa 64(%rdi),%ymm6
+vmovdqa 96(%rdi),%ymm7
+vmovdqa 128(%rdi),%ymm8
+vmovdqa 160(%rdi),%ymm9
+vmovdqa 192(%rdi),%ymm10
+vmovdqa 224(%rdi),%ymm11
+
+shuffle8 4,8,3,8
+shuffle8 5,9,4,9
+shuffle8 6,10,5,10
+shuffle8 7,11,6,11
+
+shuffle4 3,5,7,5
+shuffle4 8,10,3,10
+shuffle4 4,6,8,6
+shuffle4 9,11,4,11
+
+shuffle2 7,8,9,8
+shuffle2 5,6,7,6
+shuffle2 3,4,5,4
+shuffle2 10,11,3,11
+
+shuffle1 9,5,10,5
+shuffle1 8,4,9,4
+shuffle1 7,3,8,3
+shuffle1 6,11,7,11
+
+#store
+vmovdqa %ymm10,(%rdi)
+vmovdqa %ymm5,32(%rdi)
+vmovdqa %ymm9,64(%rdi)
+vmovdqa %ymm4,96(%rdi)
+vmovdqa %ymm8,128(%rdi)
+vmovdqa %ymm3,160(%rdi)
+vmovdqa %ymm7,192(%rdi)
+vmovdqa %ymm11,224(%rdi)
+
+ret
+
+.global cdecl(nttunpack_avx2_asm)
+cdecl(nttunpack_avx2_asm):
+call nttunpack128_avx
+add $256,%rdi
+call nttunpack128_avx
+ret
+
+ntttobytes128_avx:
+#load
+vmovdqa (%rsi),%ymm5
+vmovdqa 32(%rsi),%ymm6
+vmovdqa 64(%rsi),%ymm7
+vmovdqa 96(%rsi),%ymm8
+vmovdqa 128(%rsi),%ymm9
+vmovdqa 160(%rsi),%ymm10
+vmovdqa 192(%rsi),%ymm11
+vmovdqa 224(%rsi),%ymm12
+
+#csubq
+csubq 5,13
+csubq 6,13
+csubq 7,13
+csubq 8,13
+csubq 9,13
+csubq 10,13
+csubq 11,13
+csubq 12,13
+
+#bitpack
+vpsllw $12,%ymm6,%ymm4
+vpor %ymm4,%ymm5,%ymm4
+
+vpsrlw $4,%ymm6,%ymm5
+vpsllw $8,%ymm7,%ymm6
+vpor %ymm5,%ymm6,%ymm5
+
+vpsrlw $8,%ymm7,%ymm6
+vpsllw $4,%ymm8,%ymm7
+vpor %ymm6,%ymm7,%ymm6
+
+vpsllw $12,%ymm10,%ymm7
+vpor %ymm7,%ymm9,%ymm7
+
+vpsrlw $4,%ymm10,%ymm8
+vpsllw $8,%ymm11,%ymm9
+vpor %ymm8,%ymm9,%ymm8
+
+vpsrlw $8,%ymm11,%ymm9
+vpsllw $4,%ymm12,%ymm10
+vpor %ymm9,%ymm10,%ymm9
+
+shuffle1 4,5,3,5
+shuffle1 6,7,4,7
+shuffle1 8,9,6,9
+
+shuffle2 3,4,8,4
+shuffle2 6,5,3,5
+shuffle2 7,9,6,9
+
+shuffle4 8,3,7,3
+shuffle4 6,4,8,4
+shuffle4 5,9,6,9
+
+shuffle8 7,8,5,8
+shuffle8 6,3,7,3
+shuffle8 4,9,6,9
+
+#store
+vmovdqu %ymm5,(%rdi)
+vmovdqu %ymm7,32(%rdi)
+vmovdqu %ymm6,64(%rdi)
+vmovdqu %ymm8,96(%rdi)
+vmovdqu %ymm3,128(%rdi)
+vmovdqu %ymm9,160(%rdi)
+
+ret
+
+.global cdecl(ntttobytes_avx2_asm)
+cdecl(ntttobytes_avx2_asm):
+#consts
+vmovdqa _16XQ*2(%rdx),%ymm0
+call ntttobytes128_avx
+add $256,%rsi
+add $192,%rdi
+call ntttobytes128_avx
+ret
+
+nttfrombytes128_avx:
+#load
+vmovdqu (%rsi),%ymm4
+vmovdqu 32(%rsi),%ymm5
+vmovdqu 64(%rsi),%ymm6
+vmovdqu 96(%rsi),%ymm7
+vmovdqu 128(%rsi),%ymm8
+vmovdqu 160(%rsi),%ymm9
+
+shuffle8 4,7,3,7
+shuffle8 5,8,4,8
+shuffle8 6,9,5,9
+
+shuffle4 3,8,6,8
+shuffle4 7,5,3,5
+shuffle4 4,9,7,9
+
+shuffle2 6,5,4,5
+shuffle2 8,7,6,7
+shuffle2 3,9,8,9
+
+shuffle1 4,7,10,7
+shuffle1 5,8,4,8
+shuffle1 6,9,5,9
+
+#bitunpack
+vpsrlw $12,%ymm10,%ymm11
+vpsllw $4,%ymm7,%ymm12
+vpor %ymm11,%ymm12,%ymm11
+vpand %ymm0,%ymm10,%ymm10
+vpand %ymm0,%ymm11,%ymm11
+
+vpsrlw $8,%ymm7,%ymm12
+vpsllw $8,%ymm4,%ymm13
+vpor %ymm12,%ymm13,%ymm12
+vpand %ymm0,%ymm12,%ymm12
+
+vpsrlw $4,%ymm4,%ymm13
+vpand %ymm0,%ymm13,%ymm13
+
+vpsrlw $12,%ymm8,%ymm14
+vpsllw $4,%ymm5,%ymm15
+vpor %ymm14,%ymm15,%ymm14
+vpand %ymm0,%ymm8,%ymm8
+vpand %ymm0,%ymm14,%ymm14
+
+vpsrlw $8,%ymm5,%ymm15
+vpsllw $8,%ymm9,%ymm1
+vpor %ymm15,%ymm1,%ymm15
+vpand %ymm0,%ymm15,%ymm15
+
+vpsrlw $4,%ymm9,%ymm1
+vpand %ymm0,%ymm1,%ymm1
+
+#store
+vmovdqa %ymm10,(%rdi)
+vmovdqa %ymm11,32(%rdi)
+vmovdqa %ymm12,64(%rdi)
+vmovdqa %ymm13,96(%rdi)
+vmovdqa %ymm8,128(%rdi)
+vmovdqa %ymm14,160(%rdi)
+vmovdqa %ymm15,192(%rdi)
+vmovdqa %ymm1,224(%rdi)
+
+ret
+
+.global cdecl(nttfrombytes_avx2_asm)
+cdecl(nttfrombytes_avx2_asm):
+#consts
+vmovdqa _16XMASK*2(%rdx),%ymm0
+call nttfrombytes128_avx
+add $256,%rdi
+add $192,%rsi
+call nttfrombytes128_avx
+ret
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_symmetric-shake.c b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_symmetric-shake.c
new file mode 100644
index 0000000000..390a2a4e38
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_symmetric-shake.c
@@ -0,0 +1,49 @@
+#include "kyber512r3_params.h"
+#include "kyber512r3_fips202.h"
+#include "kyber512r3_symmetric.h"
+#include <stdlib.h>
+
+/*************************************************
+* Name: kyber_shake128_absorb
+*
+* Description: Absorb step of the SHAKE128 specialized for the Kyber context.
+
+* Arguments: - keccak_state *s: pointer to (uninitialized) output Keccak state
+* - const uint8_t *input: pointer to S2N_KYBER_512_R3_SYMBYTES input to be absorbed into s
+* - uint8_t i additional byte of input
+* - uint8_t j additional byte of input
+**************************************************/
+void kyber_shake128_absorb(keccak_state *s, const uint8_t *input, uint8_t x, uint8_t y) {
+ size_t i;
+ uint8_t extseed[S2N_KYBER_512_R3_SYMBYTES + 2];
+
+ for (i = 0; i < S2N_KYBER_512_R3_SYMBYTES; i++) {
+ extseed[i] = input[i];
+ }
+ extseed[i++] = x;
+ extseed[i] = y;
+ shake128_absorb(s, extseed, S2N_KYBER_512_R3_SYMBYTES + 2);
+}
+
+/*************************************************
+* Name: shake256_prf
+*
+* Description: Usage of SHAKE256 as a PRF, concatenates secret and public input
+* and then generates outlen bytes of SHAKE256 output
+*
+* Arguments: - uint8_t *output: pointer to output
+* - size_t outlen: number of requested output bytes
+* - const uint8_t * key: pointer to the key (of length S2N_KYBER_512_R3_SYMBYTES)
+* - uint8_t nonce: single-byte nonce (public PRF input)
+**************************************************/
+void shake256_prf(uint8_t *output, size_t outlen, const uint8_t *key, uint8_t nonce) {
+ uint8_t extkey[S2N_KYBER_512_R3_SYMBYTES + 1];
+ size_t i;
+
+ for (i = 0; i < S2N_KYBER_512_R3_SYMBYTES; i++) {
+ extkey[i] = key[i];
+ }
+ extkey[i] = nonce;
+
+ shake256(output, outlen, extkey, S2N_KYBER_512_R3_SYMBYTES + 1);
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_symmetric.h b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_symmetric.h
new file mode 100644
index 0000000000..e898a29450
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/kyber_r3/kyber512r3_symmetric.h
@@ -0,0 +1,17 @@
+#pragma once
+
+#include "kyber512r3_params.h"
+#include "kyber512r3_fips202.h"
+#include <stdint.h>
+
+#define keccak_state S2N_KYBER_512_R3_NAMESPACE(keccak_state)
+typedef shake128ctx keccak_state;
+
+#define xof_state S2N_KYBER_512_R3_NAMESPACE(xof_state)
+typedef keccak_state xof_state;
+
+#define kyber_shake128_absorb S2N_KYBER_512_R3_NAMESPACE(kyber_shake128_absorb)
+void kyber_shake128_absorb(keccak_state *s, const uint8_t *input, uint8_t x, uint8_t y);
+
+#define shake256_prf S2N_KYBER_512_R3_NAMESPACE(shake256_prf)
+void shake256_prf(uint8_t *output, size_t outlen, const uint8_t *key, uint8_t nonce);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/s2n_pq.c b/contrib/restricted/aws/s2n/pq-crypto/s2n_pq.c
index 7381deed4e..8eda65be59 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/s2n_pq.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/s2n_pq.c
@@ -14,13 +14,23 @@
*/
#include "s2n_pq.h"
+#include "crypto/s2n_openssl.h"
-static bool sikep434r2_asm_enabled = false;
+static bool sikep434r3_asm_enabled = false;
+
+/* BIKE Round-3 code supports several levels of optimization */
+static bool bike_r3_avx2_enabled = false;
+static bool bike_r3_avx512_enabled = false;
+static bool bike_r3_pclmul_enabled = false;
+static bool bike_r3_vpclmul_enabled = false;
+
+static bool kyber512r3_avx2_bmi2_enabled = false;
#if defined(S2N_CPUID_AVAILABLE)
/* https://en.wikipedia.org/wiki/CPUID */
#include <cpuid.h>
+#define PROCESSOR_INFO_AND_FEATURES 1
#define EXTENDED_FEATURES_LEAF 7
#define EXTENDED_FEATURES_SUBLEAF_ZERO 0
@@ -35,6 +45,12 @@ static bool sikep434r2_asm_enabled = false;
#define bit_BMI2 (1 << 8)
#endif
+/* BIKE related CPU features */
+#define EBX_BIT_AVX2 (1 << 5)
+#define EBX_BIT_AVX512 (1 << 16)
+#define ECX_BIT_VPCLMUL (1 << 10)
+#define ECX_BIT_PCLMUL (1 << 1)
+
bool s2n_get_cpuid_count(uint32_t leaf, uint32_t sub_leaf, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) {
/* 0x80000000 probes for extended cpuid info */
uint32_t max_level = __get_cpuid_max(leaf & 0x80000000, 0);
@@ -67,56 +83,228 @@ bool s2n_cpu_supports_adx() {
return (ebx & bit_ADX);
}
-bool s2n_cpu_supports_sikep434r2_asm() {
-#if defined(S2N_SIKEP434R2_ASM)
- /* The sikep434r2 assembly code always requires BMI2. If the assembly
+bool s2n_cpu_supports_avx2() {
+ uint32_t eax, ebx, ecx, edx;
+ if (!s2n_get_cpuid_count(EXTENDED_FEATURES_LEAF, EXTENDED_FEATURES_SUBLEAF_ZERO, &eax, &ebx, &ecx, &edx)) {
+ return false;
+ }
+
+ return (ebx & EBX_BIT_AVX2);
+}
+
+bool s2n_cpu_supports_sikep434r3_asm() {
+#if defined(S2N_SIKE_P434_R3_ASM)
+ /* The sikep434r3 assembly code always requires BMI2. If the assembly
* was compiled with support for ADX, we also require ADX at runtime. */
- #if defined(S2N_ADX)
- return s2n_cpu_supports_bmi2() && s2n_cpu_supports_adx();
- #else
- return s2n_cpu_supports_bmi2();
- #endif
+#if defined(S2N_ADX)
+ return s2n_cpu_supports_bmi2() && s2n_cpu_supports_adx();
+#else
+ return s2n_cpu_supports_bmi2();
+#endif
+#else
+ /* sikep434r3 assembly was not supported at compile time */
+ return false;
+#endif /* defined(S2N_SIKE_P434_R3_ASM) */
+}
+
+bool s2n_cpu_supports_bike_r3_avx2() {
+#if defined(S2N_BIKE_R3_AVX2)
+ uint32_t eax, ebx, ecx, edx;
+ if (!s2n_get_cpuid_count(EXTENDED_FEATURES_LEAF, EXTENDED_FEATURES_SUBLEAF_ZERO, &eax, &ebx, &ecx, &edx)) {
+ return false;
+ }
+ return ((ebx & EBX_BIT_AVX2) != 0);
+#else
+ return false;
+#endif
+}
+
+bool s2n_cpu_supports_bike_r3_avx512() {
+#if defined(S2N_BIKE_R3_AVX512)
+ uint32_t eax, ebx, ecx, edx;
+ if (!s2n_get_cpuid_count(EXTENDED_FEATURES_LEAF, EXTENDED_FEATURES_SUBLEAF_ZERO, &eax, &ebx, &ecx, &edx)) {
+ return false;
+ }
+ return ((ebx & EBX_BIT_AVX512) != 0);
+#else
+ return false;
+#endif
+}
+
+bool s2n_cpu_supports_bike_r3_pclmul() {
+#if defined(S2N_BIKE_R3_PCLMUL)
+ uint32_t eax, ebx, ecx, edx;
+ if (!s2n_get_cpuid_count(PROCESSOR_INFO_AND_FEATURES, EXTENDED_FEATURES_SUBLEAF_ZERO, &eax, &ebx, &ecx, &edx)) {
+ return false;
+ }
+ return ((ecx & ECX_BIT_PCLMUL) != 0);
#else
- /* sikep434r2 assembly was not supported at compile time */
return false;
-#endif /* defined(S2N_SIKEP434R2_ASM) */
+#endif
+}
+
+bool s2n_cpu_supports_bike_r3_vpclmul() {
+#if defined(S2N_BIKE_R3_AVX512)
+ uint32_t eax, ebx, ecx, edx;
+ if (!s2n_get_cpuid_count(EXTENDED_FEATURES_LEAF, EXTENDED_FEATURES_SUBLEAF_ZERO, &eax, &ebx, &ecx, &edx)) {
+ return false;
+ }
+ return ((ecx & ECX_BIT_VPCLMUL) != 0);
+#else
+ return false;
+#endif
+}
+
+bool s2n_cpu_supports_kyber512r3_avx2_bmi2() {
+#if defined(S2N_KYBER512R3_AVX2_BMI2)
+ return s2n_cpu_supports_bmi2() && s2n_cpu_supports_avx2();
+#else
+ return false;
+#endif
}
#else /* defined(S2N_CPUID_AVAILABLE) */
/* If CPUID is not available, we cannot perform necessary run-time checks. */
-bool s2n_cpu_supports_sikep434r2_asm() {
+bool s2n_cpu_supports_sikep434r3_asm() {
+ return false;
+}
+
+bool s2n_cpu_supports_bike_r3_avx2() {
+ return false;
+}
+
+bool s2n_cpu_supports_bike_r3_avx512() {
+ return false;
+}
+
+bool s2n_cpu_supports_bike_r3_pclmul() {
+ return false;
+}
+
+bool s2n_cpu_supports_bike_r3_vpclmul() {
+ return false;
+}
+
+bool s2n_cpu_supports_kyber512r3_avx2_bmi2() {
return false;
}
#endif /* defined(S2N_CPUID_AVAILABLE) */
-bool s2n_sikep434r2_asm_is_enabled() {
- return sikep434r2_asm_enabled;
+bool s2n_sikep434r3_asm_is_enabled() {
+ return sikep434r3_asm_enabled;
+}
+
+bool s2n_bike_r3_is_avx2_enabled() {
+ return bike_r3_avx2_enabled;
+}
+
+bool s2n_bike_r3_is_avx512_enabled() {
+ return bike_r3_avx512_enabled;
+}
+
+bool s2n_bike_r3_is_pclmul_enabled() {
+ return bike_r3_pclmul_enabled;
+}
+
+bool s2n_bike_r3_is_vpclmul_enabled() {
+ return bike_r3_vpclmul_enabled;
+}
+
+bool s2n_kyber512r3_is_avx2_bmi2_enabled() {
+ return kyber512r3_avx2_bmi2_enabled;
}
bool s2n_pq_is_enabled() {
#if defined(S2N_NO_PQ)
return false;
#else
- return !s2n_is_in_fips_mode();
+ /* aws-lc is currently the only supported FIPS library known to support PQ. */
+ return s2n_libcrypto_is_awslc() || (!s2n_is_in_fips_mode());
#endif
}
-S2N_RESULT s2n_disable_sikep434r2_asm() {
- sikep434r2_asm_enabled = false;
+S2N_RESULT s2n_disable_sikep434r3_asm() {
+ sikep434r3_asm_enabled = false;
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_disable_bike_r3_opt_all() {
+ bike_r3_avx2_enabled = false;
+ bike_r3_avx512_enabled = false;
+ bike_r3_pclmul_enabled = false;
+ bike_r3_vpclmul_enabled = false;
return S2N_RESULT_OK;
}
-S2N_RESULT s2n_try_enable_sikep434r2_asm() {
- if (s2n_pq_is_enabled() && s2n_cpu_supports_sikep434r2_asm()) {
- sikep434r2_asm_enabled = true;
+S2N_RESULT s2n_disable_kyber512r3_opt_avx2_bmi2() {
+ kyber512r3_avx2_bmi2_enabled = false;
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_try_enable_bike_r3_opt_pclmul() {
+ if (s2n_pq_is_enabled() && s2n_cpu_supports_bike_r3_pclmul()) {
+ bike_r3_pclmul_enabled = true;
}
return S2N_RESULT_OK;
}
-S2N_RESULT s2n_pq_init() {
- ENSURE_OK(s2n_try_enable_sikep434r2_asm(), S2N_ERR_SAFETY);
+S2N_RESULT s2n_try_enable_bike_r3_opt_avx2() {
+ /* When AVX2 is available, PCLMUL is too by default. */
+ RESULT_ENSURE_OK(s2n_try_enable_bike_r3_opt_pclmul(), S2N_ERR_SAFETY);
+ if (s2n_pq_is_enabled() && s2n_cpu_supports_bike_r3_avx2()) {
+ bike_r3_avx2_enabled = true;
+ }
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_try_enable_bike_r3_opt_avx512() {
+ /* When AVX512 is available, AVX2 is too by default. */
+ RESULT_ENSURE_OK(s2n_try_enable_bike_r3_opt_avx2(), S2N_ERR_SAFETY);
+ if (s2n_pq_is_enabled() && s2n_cpu_supports_bike_r3_avx512()) {
+ bike_r3_avx512_enabled = true;
+ }
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_try_enable_bike_r3_opt_vpclmul() {
+ RESULT_ENSURE_OK(s2n_try_enable_bike_r3_opt_avx512(), S2N_ERR_SAFETY);
+ /* Only Enable VPCLMUL if AVX512 is also supported. This is to because the BIKE R3 VPCLMUL requires 512-bit version
+ * of VPCLMUL, and not the 256-bit version that is available on AMD Zen 3 processors. */
+ if (s2n_pq_is_enabled() && s2n_cpu_supports_bike_r3_vpclmul() && s2n_bike_r3_is_avx512_enabled()) {
+ bike_r3_vpclmul_enabled = true;
+ }
+ return S2N_RESULT_OK;
+}
+S2N_RESULT s2n_try_enable_sikep434r3_asm() {
+ if (s2n_pq_is_enabled() && s2n_cpu_supports_sikep434r3_asm()) {
+ sikep434r3_asm_enabled = true;
+ }
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_try_enable_kyber512r3_opt_avx2_bmi2() {
+ if (s2n_pq_is_enabled() && s2n_cpu_supports_kyber512r3_avx2_bmi2()) {
+ kyber512r3_avx2_bmi2_enabled = true;
+ }
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_bike_r3_x86_64_opt_init()
+{
+ /* try_enable_vpclmul function recursively tries to enable
+ * all the optimizations (avx2, avx512, pclmul, vpclmul),
+ * so it's sufficient to call only this function. */
+ RESULT_ENSURE_OK(s2n_try_enable_bike_r3_opt_vpclmul(), S2N_ERR_SAFETY);
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_pq_init() {
+ RESULT_ENSURE_OK(s2n_try_enable_sikep434r3_asm(), S2N_ERR_SAFETY);
+ RESULT_ENSURE_OK(s2n_bike_r3_x86_64_opt_init(), S2N_ERR_SAFETY);
+ RESULT_ENSURE_OK(s2n_try_enable_kyber512r3_opt_avx2_bmi2(), S2N_ERR_SAFETY);
+
return S2N_RESULT_OK;
}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/s2n_pq.h b/contrib/restricted/aws/s2n/pq-crypto/s2n_pq.h
index 7e5d93e991..2af5c4c940 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/s2n_pq.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/s2n_pq.h
@@ -20,8 +20,23 @@
#include "utils/s2n_safety.h"
#include "crypto/s2n_fips.h"
-bool s2n_sikep434r2_asm_is_enabled(void);
+bool s2n_sikep434r3_asm_is_enabled(void);
+S2N_RESULT s2n_disable_sikep434r3_asm(void);
+S2N_RESULT s2n_try_enable_sikep434r3_asm(void);
+
+bool s2n_bike_r3_is_avx2_enabled(void);
+bool s2n_bike_r3_is_avx512_enabled(void);
+bool s2n_bike_r3_is_pclmul_enabled(void);
+bool s2n_bike_r3_is_vpclmul_enabled(void);
+S2N_RESULT s2n_disable_bike_r3_opt_all(void);
+S2N_RESULT s2n_try_enable_bike_r3_opt_pclmul(void);
+S2N_RESULT s2n_try_enable_bike_r3_opt_avx2(void);
+S2N_RESULT s2n_try_enable_bike_r3_opt_avx512(void);
+S2N_RESULT s2n_try_enable_bike_r3_opt_vpclmul(void);
+
+bool s2n_kyber512r3_is_avx2_bmi2_enabled(void);
+S2N_RESULT s2n_try_enable_kyber512r3_opt_avx2_bmi2(void);
+S2N_RESULT s2n_disable_kyber512r3_opt_avx2_bmi2(void);
+
bool s2n_pq_is_enabled(void);
-S2N_RESULT s2n_disable_sikep434r2_asm(void);
-S2N_RESULT s2n_try_enable_sikep434r2_asm(void);
S2N_RESULT s2n_pq_init(void);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/s2n_pq_random.c b/contrib/restricted/aws/s2n/pq-crypto/s2n_pq_random.c
index 845def4a31..275a3e132d 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/s2n_pq_random.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/s2n_pq_random.c
@@ -23,21 +23,21 @@ static S2N_RESULT s2n_get_random_bytes_default(uint8_t *buffer, uint32_t num_byt
static s2n_get_random_bytes_callback s2n_get_random_bytes_cb = s2n_get_random_bytes_default;
S2N_RESULT s2n_get_random_bytes(uint8_t *buffer, uint32_t num_bytes) {
- ENSURE_REF(buffer);
- GUARD_RESULT(s2n_get_random_bytes_cb(buffer, num_bytes));
+ RESULT_ENSURE_REF(buffer);
+ RESULT_GUARD(s2n_get_random_bytes_cb(buffer, num_bytes));
return S2N_RESULT_OK;
}
static S2N_RESULT s2n_get_random_bytes_default(uint8_t *buffer, uint32_t num_bytes) {
struct s2n_blob out = { .data = buffer, .size = num_bytes };
- GUARD_RESULT(s2n_get_private_random_data(&out));
+ RESULT_GUARD(s2n_get_private_random_data(&out));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_set_rand_bytes_callback_for_testing(s2n_get_random_bytes_callback rand_bytes_callback) {
- ENSURE(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST);
+ RESULT_ENSURE(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST);
s2n_get_random_bytes_cb = rand_bytes_callback;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r1/P503_internal_r1.h b/contrib/restricted/aws/s2n/pq-crypto/sike_r1/P503_internal_r1.h
index f6674fa2bc..64465f19ed 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r1/P503_internal_r1.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r1/P503_internal_r1.h
@@ -150,7 +150,7 @@ void fpdiv2_503(const digit_t* a, digit_t* c);
void fpcorrection503(digit_t* a);
// 503-bit Montgomery reduction, c = a mod p
-void rdc_mont(const digit_t* a, digit_t* c);
+void rdc_mont(const dfelm_t ma, felm_t mc);
// Field multiplication using Montgomery arithmetic, c = a*b*R^-1 mod p503, where R=2^768
void fpmul503_mont(const felm_t a, const felm_t b, felm_t c);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r1/fips202_r1.h b/contrib/restricted/aws/s2n/pq-crypto/sike_r1/fips202_r1.h
index 128a0127bf..983537c2ca 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r1/fips202_r1.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r1/fips202_r1.h
@@ -7,7 +7,7 @@
#define SHAKE128_RATE 168
#define SHAKE256_RATE 136
-void cshake256_simple_absorb(uint64_t *s, uint16_t cstm, const unsigned char *in, unsigned long long inlen);
+void cshake256_simple_absorb(uint64_t s[25], uint16_t cstm, const unsigned char *in, unsigned long long inlen);
void cshake256_simple(unsigned char *output, unsigned long long outlen, uint16_t cstm, const unsigned char *in, unsigned long long inlen);
#endif // FIPS202_R1_H
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r1/sidh_r1.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r1/sidh_r1.c
index 7f3c63fd85..bdf2834121 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r1/sidh_r1.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r1/sidh_r1.c
@@ -63,7 +63,7 @@ int random_mod_order_B(unsigned char* random_digits)
unsigned long long nbytes = NBITS_TO_NBYTES(OBOB_BITS-1);
clear_words((void*)random_digits, MAXWORDS_ORDER);
- GUARD_AS_POSIX(s2n_get_random_bytes(random_digits, nbytes));
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(random_digits, nbytes));
random_digits[nbytes-1] &= MASK_BOB; // Masking last byte
return S2N_SUCCESS;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r1/sike_r1_kem.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r1/sike_r1_kem.c
index 3122eb6539..ee905ca74a 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r1/sike_r1_kem.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r1/sike_r1_kem.c
@@ -16,13 +16,13 @@ int SIKE_P503_r1_crypto_kem_keypair(unsigned char *pk, unsigned char *sk)
{ // SIKE's key generation
// Outputs: secret key sk (SIKE_P503_R1_SECRET_KEY_BYTES = MSG_BYTES + SECRETKEY_B_BYTES + SIKE_P503_R1_PUBLIC_KEY_BYTES bytes)
// public key pk (SIKE_P503_R1_PUBLIC_KEY_BYTES bytes)
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
digit_t _sk[SECRETKEY_B_BYTES/sizeof(digit_t)];
// Generate lower portion of secret key sk <- s||SK
- GUARD_AS_POSIX(s2n_get_random_bytes(sk, MSG_BYTES));
- GUARD(random_mod_order_B((unsigned char*)_sk));
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(sk, MSG_BYTES));
+ POSIX_GUARD(random_mod_order_B((unsigned char*)_sk));
// Generate public key pk
EphemeralKeyGeneration_B(_sk, pk);
@@ -40,7 +40,7 @@ int SIKE_P503_r1_crypto_kem_enc(unsigned char *ct, unsigned char *ss, const unsi
// Input: public key pk (SIKE_P503_R1_PUBLIC_KEY_BYTES bytes)
// Outputs: shared secret ss (SIKE_P503_R1_SHARED_SECRET_BYTES bytes)
// ciphertext message ct (SIKE_P503_R1_CIPHERTEXT_BYTES = SIKE_P503_R1_PUBLIC_KEY_BYTES + MSG_BYTES bytes)
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
const uint16_t G = 0;
const uint16_t H = 1;
@@ -55,7 +55,7 @@ int SIKE_P503_r1_crypto_kem_enc(unsigned char *ct, unsigned char *ss, const unsi
unsigned int i;
// Generate ephemeralsk <- G(m||pk) mod oA
- GUARD_AS_POSIX(s2n_get_random_bytes(temp, MSG_BYTES));
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(temp, MSG_BYTES));
memcpy(&temp[MSG_BYTES], pk, SIKE_P503_R1_PUBLIC_KEY_BYTES);
cshake256_simple(ephemeralsk.b, SECRETKEY_A_BYTES, G, temp, SIKE_P503_R1_PUBLIC_KEY_BYTES+MSG_BYTES);
@@ -82,7 +82,7 @@ int SIKE_P503_r1_crypto_kem_dec(unsigned char *ss, const unsigned char *ct, cons
// Input: secret key sk (SIKE_P503_R1_SECRET_KEY_BYTES = MSG_BYTES + SECRETKEY_B_BYTES + SIKE_P503_R1_PUBLIC_KEY_BYTES bytes)
// ciphertext message ct (SIKE_P503_R1_CIPHERTEXT_BYTES = SIKE_P503_R1_PUBLIC_KEY_BYTES + MSG_BYTES bytes)
// Outputs: shared secret ss (SIKE_P503_R1_SHARED_SECRET_BYTES bytes)
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
const uint16_t G = 0;
const uint16_t H = 1;
@@ -117,9 +117,13 @@ int SIKE_P503_r1_crypto_kem_dec(unsigned char *ss, const unsigned char *ct, cons
// Generate shared secret ss <- H(m||ct) or output ss <- H(s||ct)
EphemeralKeyGeneration_A(ephemeralsk_.d, c0_);
- if (memcmp(c0_, ct, SIKE_P503_R1_PUBLIC_KEY_BYTES) != 0) {
- memcpy(temp, sk, MSG_BYTES);
- }
+
+ // Note: This step deviates from the NIST supplied code by using constant time operations.
+ // We only want to copy the data if c0_ and ct are different
+ bool dont_copy = s2n_constant_time_equals(c0_, ct, SIKE_P503_R1_PUBLIC_KEY_BYTES);
+ // The last argument to s2n_constant_time_copy_or_dont is dont and thus prevents the copy when non-zero/true
+ s2n_constant_time_copy_or_dont(temp, sk, MSG_BYTES, dont_copy);
+
memcpy(&temp[MSG_BYTES], ct, SIKE_P503_R1_CIPHERTEXT_BYTES);
cshake256_simple(ss, SIKE_P503_R1_SHARED_SECRET_BYTES, H, temp, SIKE_P503_R1_CIPHERTEXT_BYTES+MSG_BYTES);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434.c
deleted file mode 100644
index 4288a5d186..0000000000
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/********************************************************************************************
-* SIDH: an efficient supersingular isogeny cryptography library
-*
-* Abstract: supersingular isogeny parameters and generation of functions for P434
-*********************************************************************************************/
-
-#include "P434_api.h"
-#include "P434_internal.h"
-#include "pq-crypto/s2n_pq.h"
-
-// Encoding of field elements, elements over Z_order, elements over GF(p^2) and elliptic curve points:
-// --------------------------------------------------------------------------------------------------
-// Elements over GF(p) and Z_order are encoded with the least significant octet (and digit) located at the leftmost position (i.e., little endian format).
-// Elements (a+b*i) over GF(p^2), where a and b are defined over GF(p), are encoded as {a, b}, with a in the least significant position.
-// Elliptic curve points P = (x,y) are encoded as {x, y}, with x in the least significant position.
-// Internally, the number of digits used to represent all these elements is obtained by approximating the number of bits to the immediately greater multiple of 32.
-// For example, a 434-bit field element is represented with Ceil(434 / 64) = 7 64-bit digits or Ceil(434 / 32) = 14 32-bit digits.
-
-//
-// Curve isogeny system "SIDHp434". Base curve: Montgomery curve By^2 = Cx^3 + Ax^2 + Cx defined over GF(p434^2), where A=6, B=1, C=1 and p434 = 2^216*3^137-1
-//
-
-
-// The constants p434, p434p1, and p434x2 have been duplicated in
-// sikep434r2_fp_x64_asm.S. If, for any reason, the constants are changed in
-// one file, they should be updated in the other file as well.
-const uint64_t p434[NWORDS64_FIELD] = {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFDC1767AE2FFFFFF,
- 0x7BC65C783158AEA3, 0x6CFC5FD681C52056, 0x0002341F27177344};
-const uint64_t p434p1[NWORDS64_FIELD] = {0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0xFDC1767AE3000000,
- 0x7BC65C783158AEA3, 0x6CFC5FD681C52056, 0x0002341F27177344};
-const uint64_t p434x2[NWORDS64_FIELD] = {0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFB82ECF5C5FFFFFF,
- 0xF78CB8F062B15D47, 0xD9F8BFAD038A40AC, 0x0004683E4E2EE688};
-// Order of Alice's subgroup
-const uint64_t Alice_order[NWORDS64_ORDER] = {0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000001000000};
-// Order of Bob's subgroup
-const uint64_t Bob_order[NWORDS64_ORDER] = {0x58AEA3FDC1767AE3, 0xC520567BC65C7831, 0x1773446CFC5FD681, 0x0000000002341F27};
-// Alice's generator values {XPA0 + XPA1*i, XQA0 + xQA1*i, XRA0 + XRA1*i} in GF(p434^2), expressed in Montgomery representation
-const uint64_t A_gen[6 * NWORDS64_FIELD] = {0x05ADF455C5C345BF, 0x91935C5CC767AC2B, 0xAFE4E879951F0257, 0x70E792DC89FA27B1,
- 0xF797F526BB48C8CD, 0x2181DB6131AF621F, 0x00000A1C08B1ECC4, // XPA0
- 0x74840EB87CDA7788, 0x2971AA0ECF9F9D0B, 0xCB5732BDF41715D5, 0x8CD8E51F7AACFFAA,
- 0xA7F424730D7E419F, 0xD671EB919A179E8C, 0x0000FFA26C5A924A, // XPA1
- 0xFEC6E64588B7273B, 0xD2A626D74CBBF1C6, 0xF8F58F07A78098C7, 0xE23941F470841B03,
- 0x1B63EDA2045538DD, 0x735CFEB0FFD49215, 0x0001C4CB77542876, // XQA0
- 0xADB0F733C17FFDD6, 0x6AFFBD037DA0A050, 0x680EC43DB144E02F, 0x1E2E5D5FF524E374,
- 0xE2DDA115260E2995, 0xA6E4B552E2EDE508, 0x00018ECCDDF4B53E, // XQA1
- 0x01BA4DB518CD6C7D, 0x2CB0251FE3CC0611, 0x259B0C6949A9121B, 0x60E17AC16D2F82AD,
- 0x3AA41F1CE175D92D, 0x413FBE6A9B9BC4F3, 0x00022A81D8D55643, // XRA0
- 0xB8ADBC70FC82E54A, 0xEF9CDDB0D5FADDED, 0x5820C734C80096A0, 0x7799994BAA96E0E4,
- 0x044961599E379AF8, 0xDB2B94FBF09F27E2, 0x0000B87FC716C0C6}; // XRA1
-// Bob's generator values {XPB0, XQB0, XRB0 + XRB1*i} in GF(p434^2), expressed in Montgomery representation
-const uint64_t B_gen[6 * NWORDS64_FIELD] = {0x6E5497556EDD48A3, 0x2A61B501546F1C05, 0xEB919446D049887D, 0x5864A4A69D450C4F,
- 0xB883F276A6490D2B, 0x22CC287022D5F5B9, 0x0001BED4772E551F, // XPB0
- 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
- 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, // XPB1
- 0xFAE2A3F93D8B6B8E, 0x494871F51700FE1C, 0xEF1A94228413C27C, 0x498FF4A4AF60BD62,
- 0xB00AD2A708267E8A, 0xF4328294E017837F, 0x000034080181D8AE, // XQB0
- 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
- 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, // XQB1
- 0x283B34FAFEFDC8E4, 0x9208F44977C3E647, 0x7DEAE962816F4E9A, 0x68A2BA8AA262EC9D,
- 0x8176F112EA43F45B, 0x02106D022634F504, 0x00007E8A50F02E37, // XRB0
- 0xB378B7C1DA22CCB1, 0x6D089C99AD1D9230, 0xEBE15711813E2369, 0x2B35A68239D48A53,
- 0x445F6FD138407C93, 0xBEF93B29A3F6B54B, 0x000173FA910377D3}; // XRB1
-// Montgomery constant Montgomery_R2 = (2^448)^2 mod p434
-const uint64_t Montgomery_R2[NWORDS64_FIELD] = {0x28E55B65DCD69B30, 0xACEC7367768798C2, 0xAB27973F8311688D, 0x175CC6AF8D6C7C0B,
- 0xABCD92BF2DDE347E, 0x69E16A61C7686D9A, 0x000025A89BCDD12A};
-// Value one in Montgomery representation
-const uint64_t Montgomery_one[NWORDS64_FIELD] = {0x000000000000742C, 0x0000000000000000, 0x0000000000000000, 0xB90FF404FC000000,
- 0xD801A4FB559FACD4, 0xE93254545F77410C, 0x0000ECEEA7BD2EDA};
-
-// Fixed parameters for isogeny tree computation
-const unsigned int strat_Alice[MAX_Alice - 1] = {
- 48, 28, 16, 8, 4, 2, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 8, 4, 2, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 13, 7, 4, 2, 1, 1, 2, 1, 1, 3, 2, 1, 1,
- 1, 1, 5, 4, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 21, 12, 7, 4, 2, 1, 1, 2, 1, 1, 3, 2, 1, 1, 1, 1, 5, 3, 2, 1, 1, 1, 1, 2, 1, 1, 1, 9, 5, 3, 2, 1, 1,
- 1, 1, 2, 1, 1, 1, 4, 2, 1, 1, 1, 2, 1, 1};
-
-const unsigned int strat_Bob[MAX_Bob - 1] = {
- 66, 33, 17, 9, 5, 3, 2, 1, 1, 1, 1, 2, 1, 1, 1, 4, 2, 1, 1, 1, 2, 1, 1, 8, 4, 2, 1, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 16, 8, 4, 2, 1, 1, 1,
- 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 8, 4, 2, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 32, 16, 8, 4, 3, 1, 1, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 8, 4, 2,
- 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 16, 8, 4, 2, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 8, 4, 2, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1};
-
-// Setting up macro defines and including GF(p), GF(p^2), curve, isogeny and kex functions
-#define fpcopy fpcopy434
-#define fpzero fpzero434
-#define fpadd fpadd434
-#define fpsub fpsub434
-#define fpneg fpneg434
-#define fpdiv2 fpdiv2_434
-#define fpcorrection fpcorrection434
-#define fpmul_mont fpmul434_mont
-#define fpsqr_mont fpsqr434_mont
-#define fpinv_mont fpinv434_mont
-#define fpinv_chain_mont fpinv434_chain_mont
-#define fp2copy fp2copy434
-#define fp2zero fp2zero434
-#define fp2add fp2add434
-#define fp2sub fp2sub434
-#define fp2neg fp2neg434
-#define fp2div2 fp2div2_434
-#define fp2correction fp2correction434
-#define fp2mul_mont fp2mul434_mont
-#define fp2sqr_mont fp2sqr434_mont
-#define fp2inv_mont fp2inv434_mont
-#define mp_add_asm mp_add434_asm
-#define mp_subaddx2_asm mp_subadd434x2_asm
-#define mp_dblsubx2_asm mp_dblsub434x2_asm
-#define random_mod_order_A oqs_kem_sidh_p434_random_mod_order_A
-#define random_mod_order_B oqs_kem_sidh_p434_random_mod_order_B
-#define EphemeralKeyGeneration_A oqs_kem_sidh_p434_EphemeralKeyGeneration_A
-#define EphemeralKeyGeneration_B oqs_kem_sidh_p434_EphemeralKeyGeneration_B
-#define EphemeralSecretAgreement_A oqs_kem_sidh_p434_EphemeralSecretAgreement_A
-#define EphemeralSecretAgreement_B oqs_kem_sidh_p434_EphemeralSecretAgreement_B
-
-#include "fp.c"
-#include "fpx.c"
-#include "ec_isogeny.c"
-#include "sidh.c"
-#include "sike_r2_kem.c"
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434_api.h b/contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434_api.h
deleted file mode 100644
index bdf3eee8cd..0000000000
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434_api.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/********************************************************************************************
-* SIDH: an efficient supersingular isogeny cryptography library
-*
-* Abstract: API header file for P434
-*********************************************************************************************/
-
-#ifndef P434_API_H
-#define P434_API_H
-
-#include "P434_internal.h"
-
-/*********************** Key encapsulation mechanism API ***********************/
-
-#define CRYPTO_SECRETKEYBYTES 374 // MSG_BYTES + SECRETKEY_B_BYTES + CRYPTO_PUBLICKEYBYTES bytes
-#define CRYPTO_PUBLICKEYBYTES 330
-#define CRYPTO_BYTES 16
-#define CRYPTO_CIPHERTEXTBYTES 346 // CRYPTO_PUBLICKEYBYTES + MSG_BYTES bytes
-
-// Encoding of keys for KEM-based isogeny system "SIKEp434" (wire format):
-// ----------------------------------------------------------------------
-// Elements over GF(p434) are encoded in 55 octets in little endian format (i.e., the least significant octet is located in the lowest memory address).
-// Elements (a+b*i) over GF(p434^2), where a and b are defined over GF(p434), are encoded as {a, b}, with a in the lowest memory portion.
-//
-// Private keys sk consist of the concatenation of a 16-byte random value, a value in the range [0, 2^217-1] and the public key pk. In the SIKE API,
-// private keys are encoded in 374 octets in little endian format.
-// Public keys pk consist of 3 elements in GF(p434^2). In the SIKE API, pk is encoded in 330 octets.
-// Ciphertexts ct consist of the concatenation of a public key value and a 16-byte value. In the SIKE API, ct is encoded in 330 + 16 = 346 octets.
-// Shared keys ss consist of a value of 16 octets.
-
-/*********************** Ephemeral key exchange API ***********************/
-
-// SECURITY NOTE: SIDH supports ephemeral Diffie-Hellman key exchange. It is NOT secure to use it with static keys.
-// See "On the Security of Supersingular Isogeny Cryptosystems", S.D. Galbraith, C. Petit, B. Shani and Y.B. Ti, in ASIACRYPT 2016, 2016.
-// Extended version available at: http://eprint.iacr.org/2016/859
-
-// Generation of Alice's secret key
-// Outputs random value in [0, 2^216 - 1] to be used as Alice's private key
-int oqs_kem_sidh_p434_random_mod_order_A(unsigned char *random_digits);
-
-// Generation of Bob's secret key
-// Outputs random value in [0, 2^Floor(Log(2,3^137)) - 1] to be used as Bob's private key
-int oqs_kem_sidh_p434_random_mod_order_B(unsigned char *random_digits);
-
-// Alice's ephemeral public key generation
-// Input: a private key PrivateKeyA in the range [0, 2^216 - 1], stored in 27 bytes.
-// Output: the public key PublicKeyA consisting of 3 GF(p434^2) elements encoded in 330 bytes.
-int oqs_kem_sidh_p434_EphemeralKeyGeneration_A(const digit_t *PrivateKeyA, unsigned char *PublicKeyA);
-
-// Bob's ephemeral key-pair generation
-// It produces a private key PrivateKeyB and computes the public key PublicKeyB.
-// The private key is an integer in the range [0, 2^Floor(Log(2,3^137)) - 1], stored in 28 bytes.
-// The public key consists of 3 GF(p434^2) elements encoded in 330 bytes.
-int oqs_kem_sidh_p434_EphemeralKeyGeneration_B(const digit_t *PrivateKeyB, unsigned char *PublicKeyB);
-
-// Alice's ephemeral shared secret computation
-// It produces a shared secret key SharedSecretA using her secret key PrivateKeyA and Bob's public key PublicKeyB
-// Inputs: Alice's PrivateKeyA is an integer in the range [0, 2^216 - 1], stored in 27 bytes.
-// Bob's PublicKeyB consists of 3 GF(p434^2) elements encoded in 330 bytes.
-// Output: a shared secret SharedSecretA that consists of one element in GF(p434^2) encoded in 110 bytes.
-int oqs_kem_sidh_p434_EphemeralSecretAgreement_A(const digit_t *PrivateKeyA, const unsigned char *PublicKeyB, unsigned char *SharedSecretA);
-
-// Bob's ephemeral shared secret computation
-// It produces a shared secret key SharedSecretB using his secret key PrivateKeyB and Alice's public key PublicKeyA
-// Inputs: Bob's PrivateKeyB is an integer in the range [0, 2^Floor(Log(2,3^137)) - 1], stored in 28 bytes.
-// Alice's PublicKeyA consists of 3 GF(p434^2) elements encoded in 330 bytes.
-// Output: a shared secret SharedSecretB that consists of one element in GF(p434^2) encoded in 110 bytes.
-int oqs_kem_sidh_p434_EphemeralSecretAgreement_B(const digit_t *PrivateKeyB, const unsigned char *PublicKeyA, unsigned char *SharedSecretB);
-
-
-#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434_internal.h b/contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434_internal.h
deleted file mode 100644
index 30056d455b..0000000000
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/P434_internal.h
+++ /dev/null
@@ -1,225 +0,0 @@
-/********************************************************************************************
-* SIDH: an efficient supersingular isogeny cryptography library
-*
-* Abstract: internal header file for P434
-*********************************************************************************************/
-
-#ifndef P434_INTERNAL_H
-#define P434_INTERNAL_H
-
-#include "config.h"
-
-#if (TARGET == TARGET_AMD64)
-#define NWORDS_FIELD 7 // Number of words of a 434-bit field element
-#define p434_ZERO_WORDS 3 // Number of "0" digits in the least significant part of p434 + 1
-#elif (TARGET == TARGET_x86)
-#define NWORDS_FIELD 14
-#define p434_ZERO_WORDS 6
-#elif (TARGET == TARGET_ARM)
-#define NWORDS_FIELD 14
-#define p434_ZERO_WORDS 6
-#elif (TARGET == TARGET_ARM64)
-#define NWORDS_FIELD 7
-#define p434_ZERO_WORDS 3
-#endif
-
-// Basic constants
-
-#define NBITS_FIELD 434
-#define MAXBITS_FIELD 448
-#define MAXWORDS_FIELD ((MAXBITS_FIELD + RADIX - 1) / RADIX) // Max. number of words to represent field elements
-#define NWORDS64_FIELD ((NBITS_FIELD + 63) / 64) // Number of 64-bit words of a 434-bit field element
-#define NBITS_ORDER 256
-#define NWORDS_ORDER ((NBITS_ORDER + RADIX - 1) / RADIX) // Number of words of oA and oB, where oA and oB are the subgroup orders of Alice and Bob, resp.
-#define NWORDS64_ORDER ((NBITS_ORDER + 63) / 64) // Number of 64-bit words of a 224-bit element
-#define MAXBITS_ORDER NBITS_ORDER
-#define ALICE 0
-#define BOB 1
-#define OALICE_BITS 216
-#define OBOB_BITS 218
-#define OBOB_EXPON 137
-#define MASK_ALICE 0xFF
-#define MASK_BOB 0x01
-#define PRIME p434
-#define PARAM_A 6
-#define PARAM_C 1
-// Fixed parameters for isogeny tree computation
-#define MAX_INT_POINTS_ALICE 7
-#define MAX_INT_POINTS_BOB 8
-#define MAX_Alice 108
-#define MAX_Bob 137
-#define MSG_BYTES 16
-#define SECRETKEY_A_BYTES ((OALICE_BITS + 7) / 8)
-#define SECRETKEY_B_BYTES ((OBOB_BITS - 1 + 7) / 8)
-#define FP2_ENCODED_BYTES 2 * ((NBITS_FIELD + 7) / 8)
-
-// SIDH's basic element definitions and point representations
-
-typedef digit_t felm_t[NWORDS_FIELD]; // Datatype for representing 434-bit field elements (448-bit max.)
-typedef digit_t dfelm_t[2 * NWORDS_FIELD]; // Datatype for representing double-precision 2x434-bit field elements (448-bit max.)
-typedef struct felm_s {
- felm_t e[2];
-} f2elm_t; // Datatype for representing quadratic extension field elements GF(p434^2)
-
-typedef struct {
- f2elm_t X;
- f2elm_t Z;
-} point_proj; // Point representation in projective XZ Montgomery coordinates.
-typedef point_proj point_proj_t[1];
-
-/**************** Function prototypes ****************/
-/************* Multiprecision functions **************/
-
-// Copy wordsize digits, c = a, where lng(a) = nwords
-void copy_words(const digit_t *a, digit_t *c, const unsigned int nwords);
-
-// Multiprecision addition, c = a+b, where lng(a) = lng(b) = nwords. Returns the carry bit
-unsigned int mp_add(const digit_t *a, const digit_t *b, digit_t *c, const unsigned int nwords);
-
-// 434-bit multiprecision addition, c = a+b
-void mp_add434_asm(const digit_t *a, const digit_t *b, digit_t *c);
-
-// Multiprecision subtraction, c = a-b, where lng(a) = lng(b) = nwords. Returns the borrow bit
-unsigned int mp_sub(const digit_t *a, const digit_t *b, digit_t *c, const unsigned int nwords);
-
-// 2x434-bit multiprecision subtraction followed by addition with p434*2^448, c = a-b+(p434*2^448) if a-b < 0, otherwise c=a-b
-void mp_subaddx2_asm(const digit_t *a, const digit_t *b, digit_t *c);
-void mp_subadd434x2_asm(const digit_t *a, const digit_t *b, digit_t *c);
-
-// Double 2x434-bit multiprecision subtraction, c = c-a-b, where c > a and c > b
-void mp_dblsub434x2_asm(const digit_t *a, const digit_t *b, digit_t *c);
-
-// Multiprecision right shift by one
-void mp_shiftr1(digit_t *x, const unsigned int nwords);
-
-// Digit multiplication, digit * digit -> 2-digit result
-void digit_x_digit(const digit_t a, const digit_t b, digit_t *c);
-
-// Multiprecision comba multiply, c = a*b, where lng(a) = lng(b) = nwords.
-void mp_mul(const digit_t *a, const digit_t *b, digit_t *c, const unsigned int nwords);
-
-/************ Field arithmetic functions *************/
-
-// Copy of a field element, c = a
-void fpcopy434(const digit_t *a, digit_t *c);
-
-// Zeroing a field element, a = 0
-void fpzero434(digit_t *a);
-
-// Modular addition, c = a+b mod p434
-extern void fpadd434(const digit_t *a, const digit_t *b, digit_t *c);
-extern void fpadd434_asm(const digit_t *a, const digit_t *b, digit_t *c);
-
-// Modular subtraction, c = a-b mod p434
-extern void fpsub434(const digit_t *a, const digit_t *b, digit_t *c);
-extern void fpsub434_asm(const digit_t *a, const digit_t *b, digit_t *c);
-
-// Modular negation, a = -a mod p434
-extern void fpneg434(digit_t *a);
-
-// Modular division by two, c = a/2 mod p434.
-void fpdiv2_434(const digit_t *a, digit_t *c);
-
-// Modular correction to reduce field element a in [0, 2*p434-1] to [0, p434-1].
-void fpcorrection434(digit_t *a);
-
-// 434-bit Montgomery reduction, c = a mod p
-void rdc_mont(const digit_t *a, digit_t *c);
-
-// Field multiplication using Montgomery arithmetic, c = a*b*R^-1 mod p434, where R=2^768
-void fpmul434_mont(const digit_t *a, const digit_t *b, digit_t *c);
-void mul434_asm(const digit_t *a, const digit_t *b, digit_t *c);
-void rdc434_asm(const digit_t *ma, digit_t *mc);
-
-// Field squaring using Montgomery arithmetic, c = a*b*R^-1 mod p434, where R=2^768
-void fpsqr434_mont(const digit_t *ma, digit_t *mc);
-
-// Conversion to Montgomery representation
-void to_mont(const digit_t *a, digit_t *mc);
-
-// Conversion from Montgomery representation to standard representation
-void from_mont(const digit_t *ma, digit_t *c);
-
-// Field inversion, a = a^-1 in GF(p434)
-void fpinv434_mont(digit_t *a);
-
-// Chain to compute (p434-3)/4 using Montgomery arithmetic
-void fpinv434_chain_mont(digit_t *a);
-
-/************ GF(p^2) arithmetic functions *************/
-
-// Copy of a GF(p434^2) element, c = a
-void fp2copy434(const f2elm_t *a, f2elm_t *c);
-
-// Zeroing a GF(p434^2) element, a = 0
-void fp2zero434(f2elm_t *a);
-
-// GF(p434^2) negation, a = -a in GF(p434^2)
-void fp2neg434(f2elm_t *a);
-
-// GF(p434^2) addition, c = a+b in GF(p434^2)
-void fp2add434(const f2elm_t *a, const f2elm_t *b, f2elm_t *c);
-
-// GF(p434^2) subtraction, c = a-b in GF(p434^2)
-extern void fp2sub434(const f2elm_t *a, const f2elm_t *b, f2elm_t *c);
-
-// GF(p434^2) division by two, c = a/2 in GF(p434^2)
-void fp2div2_434(const f2elm_t *a, f2elm_t *c);
-
-// Modular correction, a = a in GF(p434^2)
-void fp2correction434(f2elm_t *a);
-
-// GF(p434^2) squaring using Montgomery arithmetic, c = a^2 in GF(p434^2)
-void fp2sqr434_mont(const f2elm_t *a, f2elm_t *c);
-
-// GF(p434^2) multiplication using Montgomery arithmetic, c = a*b in GF(p434^2)
-void fp2mul434_mont(const f2elm_t *a, const f2elm_t *b, f2elm_t *c);
-
-// Conversion of a GF(p434^2) element to Montgomery representation
-void to_fp2mont(const f2elm_t *a, f2elm_t *mc);
-
-// Conversion of a GF(p434^2) element from Montgomery representation to standard representation
-void from_fp2mont(const f2elm_t *ma, f2elm_t *c);
-
-// GF(p434^2) inversion using Montgomery arithmetic, a = (a0-i*a1)/(a0^2+a1^2)
-void fp2inv434_mont(f2elm_t *a);
-
-/************ Elliptic curve and isogeny functions *************/
-
-// Computes the j-invariant of a Montgomery curve with projective constant.
-void j_inv(const f2elm_t *A, const f2elm_t *C, f2elm_t *jinv);
-
-// Simultaneous doubling and differential addition.
-void xDBLADD(point_proj_t P, point_proj_t Q, const f2elm_t *xPQ, const f2elm_t *A24);
-
-// Doubling of a Montgomery point in projective coordinates (X:Z).
-void xDBL(const point_proj_t P, point_proj_t Q, const f2elm_t *A24plus, const f2elm_t *C24);
-
-// Computes [2^e](X:Z) on Montgomery curve with projective constant via e repeated doublings.
-void xDBLe(const point_proj_t P, point_proj_t Q, const f2elm_t *A24plus, const f2elm_t *C24, const int e);
-
-// Computes the corresponding 4-isogeny of a projective Montgomery point (X4:Z4) of order 4.
-void get_4_isog(const point_proj_t P, f2elm_t *A24plus, f2elm_t *C24, f2elm_t *coeff);
-
-// Evaluates the isogeny at the point (X:Z) in the domain of the isogeny.
-void eval_4_isog(point_proj_t P, f2elm_t *coeff);
-
-// Tripling of a Montgomery point in projective coordinates (X:Z).
-void xTPL(const point_proj_t P, point_proj_t Q, const f2elm_t *A24minus, const f2elm_t *A24plus);
-
-// Computes [3^e](X:Z) on Montgomery curve with projective constant via e repeated triplings.
-void xTPLe(const point_proj_t P, point_proj_t Q, const f2elm_t *A24minus, const f2elm_t *A24plus, const int e);
-
-// Computes the corresponding 3-isogeny of a projective Montgomery point (X3:Z3) of order 3.
-void get_3_isog(const point_proj_t P, f2elm_t *A24minus, f2elm_t *A24plus, f2elm_t *coeff);
-
-// Computes the 3-isogeny R=phi(X:Z), given projective point (X3:Z3) of order 3 on a Montgomery curve and a point P with coefficients given in coeff.
-void eval_3_isog(point_proj_t Q, const f2elm_t *coeff);
-
-// 3-way simultaneous inversion
-void inv_3_way(f2elm_t *z1, f2elm_t *z2, f2elm_t *z3);
-
-// Given the x-coordinates of P, Q, and R, returns the value A corresponding to the Montgomery curve E_A: y^2=x^3+A*x^2+x such that R=Q-P on E_A.
-void get_A(const f2elm_t *xP, const f2elm_t *xQ, const f2elm_t *xR, f2elm_t *A);
-
-#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/config.h b/contrib/restricted/aws/s2n/pq-crypto/sike_r2/config.h
deleted file mode 100644
index 6199e5a708..0000000000
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/config.h
+++ /dev/null
@@ -1,218 +0,0 @@
-/********************************************************************************************
-* SIDH: an efficient supersingular isogeny cryptography library
-*
-* Abstract: configuration file and platform-dependent macros
-*********************************************************************************************/
-
-#ifndef SIKE_CONFIG_H
-#define SIKE_CONFIG_H
-
-#include <stdint.h>
-#include <stdbool.h>
-#include <stddef.h>
-
-// Definition of operating system
-
-#define OS_WIN 1
-#define OS_LINUX 2
-
-#if defined(_WIN32) // Microsoft Windows OS
-#define OS_TARGET OS_WIN
-#else
-#define OS_TARGET OS_LINUX // default to Linux
-#endif
-
-// Definition of compiler (removed in OQS)
-
-#define COMPILER_GCC 1
-#define COMPILER_CLANG 2
-
-#if defined(__GNUC__) // GNU GCC compiler
-#define COMPILER COMPILER_GCC
-#elif defined(__clang__) // Clang compiler
-#define COMPILER COMPILER_CLANG
-#else
-#error -- "Unsupported COMPILER"
-#endif
-
-// Definition of the targeted architecture and basic data types
-#define TARGET_AMD64 1
-#define TARGET_x86 2
-#define TARGET_ARM 3
-#define TARGET_ARM64 4
-
-#if defined(__x86_64__)
-#define TARGET TARGET_AMD64
-#define RADIX 64
-#define LOG2RADIX 6
-typedef uint64_t digit_t; // Unsigned 64-bit digit
-typedef uint32_t hdigit_t; // Unsigned 32-bit digit
-#elif defined(__i386__)
-#define TARGET TARGET_x86
-#define RADIX 32
-#define LOG2RADIX 5
-typedef uint32_t digit_t; // Unsigned 32-bit digit
-typedef uint16_t hdigit_t; // Unsigned 16-bit digit
-#elif defined(__arm__)
-#define TARGET TARGET_ARM
-#define RADIX 32
-#define LOG2RADIX 5
-typedef uint32_t digit_t; // Unsigned 32-bit digit
-typedef uint16_t hdigit_t; // Unsigned 16-bit digit
-#elif defined(__aarch64__)
-#define TARGET TARGET_ARM64
-#define RADIX 64
-#define LOG2RADIX 6
-typedef uint64_t digit_t; // Unsigned 64-bit digit
-typedef uint32_t hdigit_t; // Unsigned 32-bit digit
-#else
-#error-- "Unsupported ARCHITECTURE"
-#endif
-
-#define RADIX64 64
-
-// Extended datatype support
-#if !defined(S2N_SIKEP434R2_ASM)
-typedef uint64_t uint128_t[2];
-#elif (TARGET == TARGET_AMD64 && OS_TARGET == OS_LINUX)
-typedef unsigned uint128_t __attribute__((mode(TI)));
-#elif (TARGET == TARGET_ARM64 && OS_TARGET == OS_LINUX)
-typedef unsigned uint128_t __attribute__((mode(TI)));
-#elif (TARGET == TARGET_AMD64 && OS_TARGET == OS_WIN)
-typedef uint64_t uint128_t[2];
-#endif
-
-// Macro definitions
-
-#define NBITS_TO_NBYTES(nbits) (((nbits) + 7) / 8) // Conversion macro from number of bits to number of bytes
-#define NBITS_TO_NWORDS(nbits) (((nbits) + (sizeof(digit_t) * 8) - 1) / (sizeof(digit_t) * 8)) // Conversion macro from number of bits to number of computer words
-#define NBYTES_TO_NWORDS(nbytes) (((nbytes) + sizeof(digit_t) - 1) / sizeof(digit_t)) // Conversion macro from number of bytes to number of computer words
-
-// Macro to avoid compiler warnings when detecting unreferenced parameters
-#define UNREFERENCED_PARAMETER(PAR) ((void) (PAR))
-
-/********************** Constant-time unsigned comparisons ***********************/
-
-// The following functions return 1 (TRUE) if condition is true, 0 (FALSE) otherwise
-
-unsigned int is_digit_nonzero_ct(digit_t x) { // Is x != 0?
- return (unsigned int) ((x | (0 - x)) >> (RADIX - 1));
-}
-
-unsigned int is_digit_zero_ct(digit_t x) { // Is x = 0?
- return (unsigned int) (1 ^ is_digit_nonzero_ct(x));
-}
-
-unsigned int is_digit_lessthan_ct(digit_t x, digit_t y) { // Is x < y?
- return (unsigned int) ((x ^ ((x ^ y) | ((x - y) ^ y))) >> (RADIX - 1));
-}
-
-/********************** Macros for platform-dependent operations **********************/
-
-#if (!defined(S2N_SIKEP434R2_ASM)) || (TARGET == TARGET_ARM)
-
-// Digit multiplication
-#define MUL(multiplier, multiplicand, hi, lo) \
- digit_x_digit((multiplier), (multiplicand), &(lo));
-
-// Digit addition with carry
-#define ADDC(carryIn, addend1, addend2, carryOut, sumOut) \
- { \
- digit_t tempReg = (addend1) + (digit_t)(carryIn); \
- (sumOut) = (addend2) + tempReg; \
- (carryOut) = (is_digit_lessthan_ct(tempReg, (digit_t)(carryIn)) | is_digit_lessthan_ct((sumOut), tempReg)); \
- }
-
-// Digit subtraction with borrow
-#define SUBC(borrowIn, minuend, subtrahend, borrowOut, differenceOut) \
- { \
- digit_t tempReg = (minuend) - (subtrahend); \
- unsigned int borrowReg = (is_digit_lessthan_ct((minuend), (subtrahend)) | ((borrowIn) &is_digit_zero_ct(tempReg))); \
- (differenceOut) = tempReg - (digit_t)(borrowIn); \
- (borrowOut) = borrowReg; \
- }
-
-// Shift right with flexible datatype
-#define SHIFTR(highIn, lowIn, shift, shiftOut, DigitSize) \
- (shiftOut) = ((lowIn) >> (shift)) ^ ((highIn) << (DigitSize - (shift)));
-
-// Shift left with flexible datatype
-#define SHIFTL(highIn, lowIn, shift, shiftOut, DigitSize) \
- (shiftOut) = ((highIn) << (shift)) ^ ((lowIn) >> (DigitSize - (shift)));
-
-#elif (TARGET == TARGET_AMD64 && OS_TARGET == OS_WIN)
-
-// Digit multiplication
-#define MUL(multiplier, multiplicand, hi, lo) \
- (lo) = _umul128((multiplier), (multiplicand), (hi));
-
-// Digit addition with carry
-#define ADDC(carryIn, addend1, addend2, carryOut, sumOut) \
- (carryOut) = _addcarry_u64((carryIn), (addend1), (addend2), &(sumOut));
-
-// Digit subtraction with borrow
-#define SUBC(borrowIn, minuend, subtrahend, borrowOut, differenceOut) \
- (borrowOut) = _subborrow_u64((borrowIn), (minuend), (subtrahend), &(differenceOut));
-
-// Digit shift right
-#define SHIFTR(highIn, lowIn, shift, shiftOut, DigitSize) \
- (shiftOut) = __shiftright128((lowIn), (highIn), (shift));
-
-// Digit shift left
-#define SHIFTL(highIn, lowIn, shift, shiftOut, DigitSize) \
- (shiftOut) = __shiftleft128((lowIn), (highIn), (shift));
-
-// 64x64-bit multiplication
-#define MUL128(multiplier, multiplicand, product) \
- (product)[0] = _umul128((multiplier), (multiplicand), &(product)[1]);
-
-// 128-bit addition with output carry
-#define ADC128(addend1, addend2, carry, addition) \
- (carry) = _addcarry_u64(0, (addend1)[0], (addend2)[0], &(addition)[0]); \
- (carry) = _addcarry_u64((carry), (addend1)[1], (addend2)[1], &(addition)[1]);
-
-#define MULADD128(multiplier, multiplicand, addend, carry, result) \
- ; \
- { \
- uint128_t product; \
- MUL128(multiplier, multiplicand, product); \
- ADC128(addend, product, carry, result); \
- }
-
-#elif ((TARGET == TARGET_AMD64 || TARGET == TARGET_ARM64) && OS_TARGET == OS_LINUX)
-
-// Digit multiplication
-#define MUL(multiplier, multiplicand, hi, lo) \
- { \
- uint128_t tempReg = (uint128_t)(multiplier) * (uint128_t)(multiplicand); \
- *(hi) = (digit_t)(tempReg >> RADIX); \
- (lo) = (digit_t) tempReg; \
- }
-
-// Digit addition with carry
-#define ADDC(carryIn, addend1, addend2, carryOut, sumOut) \
- { \
- uint128_t tempReg = (uint128_t)(addend1) + (uint128_t)(addend2) + (uint128_t)(carryIn); \
- (carryOut) = (digit_t)(tempReg >> RADIX); \
- (sumOut) = (digit_t) tempReg; \
- }
-
-// Digit subtraction with borrow
-#define SUBC(borrowIn, minuend, subtrahend, borrowOut, differenceOut) \
- { \
- uint128_t tempReg = (uint128_t)(minuend) - (uint128_t)(subtrahend) - (uint128_t)(borrowIn); \
- (borrowOut) = (digit_t)(tempReg >> (sizeof(uint128_t) * 8 - 1)); \
- (differenceOut) = (digit_t) tempReg; \
- }
-
-// Digit shift right
-#define SHIFTR(highIn, lowIn, shift, shiftOut, DigitSize) \
- (shiftOut) = ((lowIn) >> (shift)) ^ ((highIn) << (RADIX - (shift)));
-
-// Digit shift left
-#define SHIFTL(highIn, lowIn, shift, shiftOut, DigitSize) \
- (shiftOut) = ((highIn) << (shift)) ^ ((lowIn) >> (RADIX - (shift)));
-
-#endif
-
-#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/ec_isogeny.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r2/ec_isogeny.c
deleted file mode 100644
index 8a3f85e92b..0000000000
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/ec_isogeny.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/********************************************************************************************
-* SIDH: an efficient supersingular isogeny cryptography library
-*
-* Abstract: elliptic curve and isogeny functions
-*********************************************************************************************/
-
-void xDBL(const point_proj_t P, point_proj_t Q, const f2elm_t *A24plus, const f2elm_t *C24) { // Doubling of a Montgomery point in projective coordinates (X:Z).
- // Input: projective Montgomery x-coordinates P = (X1:Z1), where x1=X1/Z1 and Montgomery curve constants A+2C and 4C.
- // Output: projective Montgomery x-coordinates Q = 2*P = (X2:Z2).
- f2elm_t _t0, _t1;
- f2elm_t *t0=&_t0, *t1=&_t1;
-
- fp2sub(&P->X, &P->Z, t0); // t0 = X1-Z1
- fp2add(&P->X, &P->Z, t1); // t1 = X1+Z1
- fp2sqr_mont(t0, t0); // t0 = (X1-Z1)^2
- fp2sqr_mont(t1, t1); // t1 = (X1+Z1)^2
- fp2mul_mont(C24, t0, &Q->Z); // Z2 = C24*(X1-Z1)^2
- fp2mul_mont(t1, &Q->Z, &Q->X); // X2 = C24*(X1-Z1)^2*(X1+Z1)^2
- fp2sub(t1, t0, t1); // t1 = (X1+Z1)^2-(X1-Z1)^2
- fp2mul_mont(A24plus, t1, t0); // t0 = A24plus*[(X1+Z1)^2-(X1-Z1)^2]
- fp2add(&Q->Z, t0, &Q->Z); // Z2 = A24plus*[(X1+Z1)^2-(X1-Z1)^2] + C24*(X1-Z1)^2
- fp2mul_mont(&Q->Z, t1, &Q->Z); // Z2 = [A24plus*[(X1+Z1)^2-(X1-Z1)^2] + C24*(X1-Z1)^2]*[(X1+Z1)^2-(X1-Z1)^2]
-}
-
-void xDBLe(const point_proj_t P, point_proj_t Q, const f2elm_t *A24plus, const f2elm_t *C24, const int e) { // Computes [2^e](X:Z) on Montgomery curve with projective constant via e repeated doublings.
- // Input: projective Montgomery x-coordinates P = (XP:ZP), such that xP=XP/ZP and Montgomery curve constants A+2C and 4C.
- // Output: projective Montgomery x-coordinates Q <- (2^e)*P.
- int i;
-
- copy_words((const digit_t *) P, (digit_t *) Q, 2 * 2 * NWORDS_FIELD);
-
- for (i = 0; i < e; i++) {
- xDBL(Q, Q, A24plus, C24);
- }
-}
-
-void get_4_isog(const point_proj_t P, f2elm_t *A24plus, f2elm_t *C24, f2elm_t *coeff) { // Computes the corresponding 4-isogeny of a projective Montgomery point (X4:Z4) of order 4.
- // Input: projective point of order four P = (X4:Z4).
- // Output: the 4-isogenous Montgomery curve with projective coefficients A+2C/4C and the 3 coefficients
- // that are used to evaluate the isogeny at a point in eval_4_isog().
-
- fp2sub(&P->X, &P->Z, &coeff[1]); // coeff[1] = X4-Z4
- fp2add(&P->X, &P->Z, &coeff[2]); // coeff[2] = X4+Z4
- fp2sqr_mont(&P->Z, &coeff[0]); // coeff[0] = Z4^2
- fp2add(&coeff[0], &coeff[0], &coeff[0]); // coeff[0] = 2*Z4^2
- fp2sqr_mont(&coeff[0], C24); // C24 = 4*Z4^4
- fp2add(&coeff[0], &coeff[0], &coeff[0]); // coeff[0] = 4*Z4^2
- fp2sqr_mont(&P->X, A24plus); // A24plus = X4^2
- fp2add(A24plus, A24plus, A24plus); // A24plus = 2*X4^2
- fp2sqr_mont(A24plus, A24plus); // A24plus = 4*X4^4
-}
-
-void eval_4_isog(point_proj_t P, f2elm_t *coeff) { // Evaluates the isogeny at the point (X:Z) in the domain of the isogeny, given a 4-isogeny phi defined
- // by the 3 coefficients in coeff (computed in the function get_4_isog()).
- // Inputs: the coefficients defining the isogeny, and the projective point P = (X:Z).
- // Output: the projective point P = phi(P) = (X:Z) in the codomain.
- f2elm_t _t0, _t1;
- f2elm_t *t0=&_t0, *t1=&_t1;
-
- fp2add(&P->X, &P->Z, t0); // t0 = X+Z
- fp2sub(&P->X, &P->Z, t1); // t1 = X-Z
- fp2mul_mont(t0, &coeff[1], &P->X); // X = (X+Z)*coeff[1]
- fp2mul_mont(t1, &coeff[2], &P->Z); // Z = (X-Z)*coeff[2]
- fp2mul_mont(t0, t1, t0); // t0 = (X+Z)*(X-Z)
- fp2mul_mont(t0, &coeff[0], t0); // t0 = coeff[0]*(X+Z)*(X-Z)
- fp2add(&P->X, &P->Z, t1); // t1 = (X-Z)*coeff[2] + (X+Z)*coeff[1]
- fp2sub(&P->X, &P->Z, &P->Z); // Z = (X-Z)*coeff[2] - (X+Z)*coeff[1]
- fp2sqr_mont(t1, t1); // t1 = [(X-Z)*coeff[2] + (X+Z)*coeff[1]]^2
- fp2sqr_mont(&P->Z, &P->Z); // Z = [(X-Z)*coeff[2] - (X+Z)*coeff[1]]^2
- fp2add(t1, t0, &P->X); // X = coeff[0]*(X+Z)*(X-Z) + [(X-Z)*coeff[2] + (X+Z)*coeff[1]]^2
- fp2sub(&P->Z, t0, t0); // t0 = [(X-Z)*coeff[2] - (X+Z)*coeff[1]]^2 - coeff[0]*(X+Z)*(X-Z)
- fp2mul_mont(&P->X, t1, &P->X); // Xfinal
- fp2mul_mont(&P->Z, t0, &P->Z); // Zfinal
-}
-
-void xTPL(const point_proj_t P, point_proj_t Q, const f2elm_t *A24minus, const f2elm_t *A24plus) { // Tripling of a Montgomery point in projective coordinates (X:Z).
- // Input: projective Montgomery x-coordinates P = (X:Z), where x=X/Z and Montgomery curve constants A24plus = A+2C and A24minus = A-2C.
- // Output: projective Montgomery x-coordinates Q = 3*P = (X3:Z3).
- f2elm_t _t0, _t1, _t2, _t3, _t4, _t5, _t6;
- f2elm_t *t0=&_t0, *t1=&_t1, *t2=&_t2, *t3=&_t3, *t4=&_t4, *t5=&_t5, *t6=&_t6;
-
- fp2sub(&P->X, &P->Z, t0); // t0 = X-Z
- fp2sqr_mont(t0, t2); // t2 = (X-Z)^2
- fp2add(&P->X, &P->Z, t1); // t1 = X+Z
- fp2sqr_mont(t1, t3); // t3 = (X+Z)^2
- fp2add(t0, t1, t4); // t4 = 2*X
- fp2sub(t1, t0, t0); // t0 = 2*Z
- fp2sqr_mont(t4, t1); // t1 = 4*X^2
- fp2sub(t1, t3, t1); // t1 = 4*X^2 - (X+Z)^2
- fp2sub(t1, t2, t1); // t1 = 4*X^2 - (X+Z)^2 - (X-Z)^2
- fp2mul_mont(t3, A24plus, t5); // t5 = A24plus*(X+Z)^2
- fp2mul_mont(t3, t5, t3); // t3 = A24plus*(X+Z)^3
- fp2mul_mont(A24minus, t2, t6); // t6 = A24minus*(X-Z)^2
- fp2mul_mont(t2, t6, t2); // t2 = A24minus*(X-Z)^3
- fp2sub(t2, t3, t3); // t3 = A24minus*(X-Z)^3 - coeff*(X+Z)^3
- fp2sub(t5, t6, t2); // t2 = A24plus*(X+Z)^2 - A24minus*(X-Z)^2
- fp2mul_mont(t1, t2, t1); // t1 = [4*X^2 - (X+Z)^2 - (X-Z)^2]*[A24plus*(X+Z)^2 - A24minus*(X-Z)^2]
- fp2add(t3, t1, t2); // t2 = [4*X^2 - (X+Z)^2 - (X-Z)^2]*[A24plus*(X+Z)^2 - A24minus*(X-Z)^2] + A24minus*(X-Z)^3 - coeff*(X+Z)^3
- fp2sqr_mont(t2, t2); // t2 = t2^2
- fp2mul_mont(t4, t2, &Q->X); // X3 = 2*X*t2
- fp2sub(t3, t1, t1); // t1 = A24minus*(X-Z)^3 - A24plus*(X+Z)^3 - [4*X^2 - (X+Z)^2 - (X-Z)^2]*[A24plus*(X+Z)^2 - A24minus*(X-Z)^2]
- fp2sqr_mont(t1, t1); // t1 = t1^2
- fp2mul_mont(t0, t1, &Q->Z); // Z3 = 2*Z*t1
-}
-
-void xTPLe(const point_proj_t P, point_proj_t Q, const f2elm_t *A24minus, const f2elm_t *A24plus, const int e) { // Computes [3^e](X:Z) on Montgomery curve with projective constant via e repeated triplings.
- // Input: projective Montgomery x-coordinates P = (XP:ZP), such that xP=XP/ZP and Montgomery curve constants A24plus = A+2C and A24minus = A-2C.
- // Output: projective Montgomery x-coordinates Q <- (3^e)*P.
- int i;
-
- copy_words((const digit_t *) P, (digit_t *) Q, 2 * 2 * NWORDS_FIELD);
-
- for (i = 0; i < e; i++) {
- xTPL(Q, Q, A24minus, A24plus);
- }
-}
-
-void get_3_isog(const point_proj_t P, f2elm_t *A24minus, f2elm_t *A24plus, f2elm_t *coeff) { // Computes the corresponding 3-isogeny of a projective Montgomery point (X3:Z3) of order 3.
- // Input: projective point of order three P = (X3:Z3).
- // Output: the 3-isogenous Montgomery curve with projective coefficient A/C.
- f2elm_t _t0, _t1, _t2, _t3, _t4;
- f2elm_t *t0=&_t0, *t1=&_t1, *t2=&_t2, *t3=&_t3, *t4=&_t4;
-
- fp2sub(&P->X, &P->Z, &coeff[0]); // coeff0 = X-Z
- fp2sqr_mont(&coeff[0], t0); // t0 = (X-Z)^2
- fp2add(&P->X, &P->Z, &coeff[1]); // coeff1 = X+Z
- fp2sqr_mont(&coeff[1], t1); // t1 = (X+Z)^2
- fp2add(t0, t1, t2); // t2 = (X+Z)^2 + (X-Z)^2
- fp2add(&coeff[0], &coeff[1], t3); // t3 = 2*X
- fp2sqr_mont(t3, t3); // t3 = 4*X^2
- fp2sub(t3, t2, t3); // t3 = 4*X^2 - (X+Z)^2 - (X-Z)^2
- fp2add(t1, t3, t2); // t2 = 4*X^2 - (X-Z)^2
- fp2add(t3, t0, t3); // t3 = 4*X^2 - (X+Z)^2
- fp2add(t0, t3, t4); // t4 = 4*X^2 - (X+Z)^2 + (X-Z)^2
- fp2add(t4, t4, t4); // t4 = 2(4*X^2 - (X+Z)^2 + (X-Z)^2)
- fp2add(t1, t4, t4); // t4 = 8*X^2 - (X+Z)^2 + 2*(X-Z)^2
- fp2mul_mont(t2, t4, A24minus); // A24minus = [4*X^2 - (X-Z)^2]*[8*X^2 - (X+Z)^2 + 2*(X-Z)^2]
- fp2add(t1, t2, t4); // t4 = 4*X^2 + (X+Z)^2 - (X-Z)^2
- fp2add(t4, t4, t4); // t4 = 2(4*X^2 + (X+Z)^2 - (X-Z)^2)
- fp2add(t0, t4, t4); // t4 = 8*X^2 + 2*(X+Z)^2 - (X-Z)^2
- fp2mul_mont(t3, t4, A24plus); // A24plus = [4*X^2 - (X+Z)^2]*[8*X^2 + 2*(X+Z)^2 - (X-Z)^2]
-}
-
-void eval_3_isog(point_proj_t Q, const f2elm_t *coeff) { // Computes the 3-isogeny R=phi(X:Z), given projective point (X3:Z3) of order 3 on a Montgomery curve and
- // a point P with 2 coefficients in coeff (computed in the function get_3_isog()).
- // Inputs: projective points P = (X3:Z3) and Q = (X:Z).
- // Output: the projective point Q <- phi(Q) = (X3:Z3).
- f2elm_t _t0, _t1, _t2;
- f2elm_t *t0=&_t0, *t1=&_t1, *t2=&_t2;
-
- fp2add(&Q->X, &Q->Z, t0); // t0 = X+Z
- fp2sub(&Q->X, &Q->Z, t1); // t1 = X-Z
- fp2mul_mont(t0, &coeff[0], t0); // t0 = coeff0*(X+Z)
- fp2mul_mont(t1, &coeff[1], t1); // t1 = coeff1*(X-Z)
- fp2add(t0, t1, t2); // t2 = coeff0*(X+Z) + coeff1*(X-Z)
- fp2sub(t1, t0, t0); // t0 = coeff1*(X-Z) - coeff0*(X+Z)
- fp2sqr_mont(t2, t2); // t2 = [coeff0*(X+Z) + coeff1*(X-Z)]^2
- fp2sqr_mont(t0, t0); // t0 = [coeff1*(X-Z) - coeff0*(X+Z)]^2
- fp2mul_mont(&Q->X, t2, &Q->X); // X3final = X*[coeff0*(X+Z) + coeff1*(X-Z)]^2
- fp2mul_mont(&Q->Z, t0, &Q->Z); // Z3final = Z*[coeff1*(X-Z) - coeff0*(X+Z)]^2
-}
-
-void inv_3_way(f2elm_t *z1, f2elm_t *z2, f2elm_t *z3) { // 3-way simultaneous inversion
- // Input: z1,z2,z3
- // Output: 1/z1,1/z2,1/z3 (override inputs).
- f2elm_t _t0, _t1, _t2, _t3;
- f2elm_t *t0=&_t0, *t1=&_t1, *t2=&_t2, *t3=&_t3;
-
- fp2mul_mont(z1, z2, t0); // t0 = z1*z2
- fp2mul_mont(z3, t0, t1); // t1 = z1*z2*z3
- fp2inv_mont(t1); // t1 = 1/(z1*z2*z3)
- fp2mul_mont(z3, t1, t2); // t2 = 1/(z1*z2)
- fp2mul_mont(t2, z2, t3); // t3 = 1/z1
- fp2mul_mont(t2, z1, z2); // z2 = 1/z2
- fp2mul_mont(t0, t1, z3); // z3 = 1/z3
- fp2copy(t3, z1); // z1 = 1/z1
-}
-
-void get_A(const f2elm_t *xP, const f2elm_t *xQ, const f2elm_t *xR, f2elm_t *A) { // Given the x-coordinates of P, Q, and R, returns the value A corresponding to the Montgomery curve E_A: y^2=x^3+A*x^2+x such that R=Q-P on E_A.
- // Input: the x-coordinates xP, xQ, and xR of the points P, Q and R.
- // Output: the coefficient A corresponding to the curve E_A: y^2=x^3+A*x^2+x.
- f2elm_t _t0, _t1, one = {0};
- f2elm_t *t0=&_t0, *t1=&_t1;
-
- fpcopy((const digit_t *) &Montgomery_one, one.e[0]);
- fp2add(xP, xQ, t1); // t1 = xP+xQ
- fp2mul_mont(xP, xQ, t0); // t0 = xP*xQ
- fp2mul_mont(xR, t1, A); // A = xR*t1
- fp2add(t0, A, A); // A = A+t0
- fp2mul_mont(t0, xR, t0); // t0 = t0*xR
- fp2sub(A, &one, A); // A = A-1
- fp2add(t0, t0, t0); // t0 = t0+t0
- fp2add(t1, xR, t1); // t1 = t1+xR
- fp2add(t0, t0, t0); // t0 = t0+t0
- fp2sqr_mont(A, A); // A = A^2
- fp2inv_mont(t0); // t0 = 1/t0
- fp2mul_mont(A, t0, A); // A = A*t0
- fp2sub(A, t1, A); // Afinal = A-t1
-}
-
-void j_inv(const f2elm_t *A, const f2elm_t *C, f2elm_t *jinv) { // Computes the j-invariant of a Montgomery curve with projective constant.
- // Input: A,C in GF(p^2).
- // Output: j=256*(A^2-3*C^2)^3/(C^4*(A^2-4*C^2)), which is the j-invariant of the Montgomery curve B*y^2=x^3+(A/C)*x^2+x or (equivalently) j-invariant of B'*y^2=C*x^3+A*x^2+C*x.
- f2elm_t _t0, _t1;
- f2elm_t *t0=&_t0, *t1=&_t1;
-
- fp2sqr_mont(A, jinv); // jinv = A^2
- fp2sqr_mont(C, t1); // t1 = C^2
- fp2add(t1, t1, t0); // t0 = t1+t1
- fp2sub(jinv, t0, t0); // t0 = jinv-t0
- fp2sub(t0, t1, t0); // t0 = t0-t1
- fp2sub(t0, t1, jinv); // jinv = t0-t1
- fp2sqr_mont(t1, t1); // t1 = t1^2
- fp2mul_mont(jinv, t1, jinv); // jinv = jinv*t1
- fp2add(t0, t0, t0); // t0 = t0+t0
- fp2add(t0, t0, t0); // t0 = t0+t0
- fp2sqr_mont(t0, t1); // t1 = t0^2
- fp2mul_mont(t0, t1, t0); // t0 = t0*t1
- fp2add(t0, t0, t0); // t0 = t0+t0
- fp2add(t0, t0, t0); // t0 = t0+t0
- fp2inv_mont(jinv); // jinv = 1/jinv
- fp2mul_mont(jinv, t0, jinv); // jinv = t0*jinv
-}
-
-void xDBLADD(point_proj_t P, point_proj_t Q, const f2elm_t *xPQ, const f2elm_t *A24) { // Simultaneous doubling and differential addition.
- // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, affine difference xPQ=x(P-Q) and Montgomery curve constant A24=(A+2)/4.
- // Output: projective Montgomery points P <- 2*P = (X2P:Z2P) such that x(2P)=X2P/Z2P, and Q <- P+Q = (XQP:ZQP) such that = x(Q+P)=XQP/ZQP.
- f2elm_t _t0, _t1, _t2;
- f2elm_t *t0=&_t0, *t1=&_t1, *t2=&_t2;
-
- fp2add(&P->X, &P->Z, t0); // t0 = XP+ZP
- fp2sub(&P->X, &P->Z, t1); // t1 = XP-ZP
- fp2sqr_mont(t0, &P->X); // XP = (XP+ZP)^2
- fp2sub(&Q->X, &Q->Z, t2); // t2 = XQ-ZQ
- fp2correction(t2);
- fp2add(&Q->X, &Q->Z, &Q->X); // XQ = XQ+ZQ
- fp2mul_mont(t0, t2, t0); // t0 = (XP+ZP)*(XQ-ZQ)
- fp2sqr_mont(t1, &P->Z); // ZP = (XP-ZP)^2
- fp2mul_mont(t1, &Q->X, t1); // t1 = (XP-ZP)*(XQ+ZQ)
- fp2sub(&P->X, &P->Z, t2); // t2 = (XP+ZP)^2-(XP-ZP)^2
- fp2mul_mont(&P->X, &P->Z, &P->X); // XP = (XP+ZP)^2*(XP-ZP)^2
- fp2mul_mont(t2, A24, &Q->X); // XQ = A24*[(XP+ZP)^2-(XP-ZP)^2]
- fp2sub(t0, t1, &Q->Z); // ZQ = (XP+ZP)*(XQ-ZQ)-(XP-ZP)*(XQ+ZQ)
- fp2add(&Q->X, &P->Z, &P->Z); // ZP = A24*[(XP+ZP)^2-(XP-ZP)^2]+(XP-ZP)^2
- fp2add(t0, t1, &Q->X); // XQ = (XP+ZP)*(XQ-ZQ)+(XP-ZP)*(XQ+ZQ)
- fp2mul_mont(&P->Z, t2, &P->Z); // ZP = [A24*[(XP+ZP)^2-(XP-ZP)^2]+(XP-ZP)^2]*[(XP+ZP)^2-(XP-ZP)^2]
- fp2sqr_mont(&Q->Z, &Q->Z); // ZQ = [(XP+ZP)*(XQ-ZQ)-(XP-ZP)*(XQ+ZQ)]^2
- fp2sqr_mont(&Q->X, &Q->X); // XQ = [(XP+ZP)*(XQ-ZQ)+(XP-ZP)*(XQ+ZQ)]^2
- fp2mul_mont(&Q->Z, xPQ, &Q->Z); // ZQ = xPQ*[(XP+ZP)*(XQ-ZQ)-(XP-ZP)*(XQ+ZQ)]^2
-}
-
-static void swap_points(point_proj_t P, point_proj_t Q, const digit_t option) { // Swap points.
- // If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then P <- Q and Q <- P
- for (unsigned int i = 0; i < NWORDS_FIELD; i++) {
- digit_t temp = option & (P->X.e[0][i] ^ Q->X.e[0][i]);
- P->X.e[0][i] = temp ^ P->X.e[0][i];
- Q->X.e[0][i] = temp ^ Q->X.e[0][i];
- temp = option & (P->Z.e[0][i] ^ Q->Z.e[0][i]);
- P->Z.e[0][i] = temp ^ P->Z.e[0][i];
- Q->Z.e[0][i] = temp ^ Q->Z.e[0][i];
- temp = option & (P->X.e[1][i] ^ Q->X.e[1][i]);
- P->X.e[1][i] = temp ^ P->X.e[1][i];
- Q->X.e[1][i] = temp ^ Q->X.e[1][i];
- temp = option & (P->Z.e[1][i] ^ Q->Z.e[1][i]);
- P->Z.e[1][i] = temp ^ P->Z.e[1][i];
- Q->Z.e[1][i] = temp ^ Q->Z.e[1][i];
- }
-}
-
-void LADDER3PT(const f2elm_t *xP, const f2elm_t *xQ, const f2elm_t *xPQ, const digit_t *m, const unsigned int AliceOrBob, point_proj_t R, const f2elm_t *A) {
- point_proj_t R0 = {0}, R2 = {0};
- f2elm_t _A24 = {0};
- f2elm_t *A24=&_A24;
- digit_t mask;
- int i, nbits, swap, prevbit = 0;
-
- if (AliceOrBob == ALICE) {
- nbits = OALICE_BITS;
- } else {
- nbits = OBOB_BITS - 1;
- }
-
- // Initializing constant
- fpcopy((const digit_t *) &Montgomery_one, A24->e[0]);
- fp2add(A24, A24, A24);
- fp2add(A, A24, A24);
- fp2div2(A24, A24);
- fp2div2(A24, A24); // A24 = (A+2)/4
-
- // Initializing points
- fp2copy(xQ, &R0->X);
- fpcopy((const digit_t *) &Montgomery_one, (digit_t *) R0->Z.e);
- fp2copy(xPQ, &R2->X);
- fpcopy((const digit_t *) &Montgomery_one, (digit_t *) R2->Z.e);
- fp2copy(xP, &R->X);
- fpcopy((const digit_t *) &Montgomery_one, (digit_t *) R->Z.e);
- fpzero((digit_t *) (R->Z.e)[1]);
-
- // Main loop
- for (i = 0; i < nbits; i++) {
- int bit = (m[i >> LOG2RADIX] >> (i & (RADIX - 1))) & 1;
- swap = bit ^ prevbit;
- prevbit = bit;
- mask = 0 - (digit_t) swap;
-
- swap_points(R, R2, mask);
- xDBLADD(R0, R2, &R->X, A24);
- fp2mul_mont(&R2->X, &R->Z, &R2->X);
- }
- swap = 0 ^ prevbit;
- mask = 0 - (digit_t) swap;
- swap_points(R, R2, mask);
-}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/fips202.h b/contrib/restricted/aws/s2n/pq-crypto/sike_r2/fips202.h
deleted file mode 100644
index 1196bff2c0..0000000000
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/fips202.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef FIPS202_H
-#define FIPS202_H
-
-#define SHAKE256_RATE 136
-
-/** Data structure for the state of the SHAKE-256 non-incremental hashing API. */
-typedef struct {
-/** Internal state. */
- uint64_t ctx[25];
-} shake256_ctx;
-
-void shake256(uint8_t *output, size_t outlen, const uint8_t *input, size_t inlen);
-
-#endif // FIPS202_H
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/fp.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r2/fp.c
deleted file mode 100644
index 0e09ce25a0..0000000000
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/fp.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/********************************************************************************************
-* SIDH: an efficient supersingular isogeny cryptography library
-*
-* Abstract: Portable C and x86_64 ASM functions for modular arithmetic for P434
-*********************************************************************************************/
-
-#include "P434_internal.h"
-
-// Modular addition, c = a+b mod p434.
-// Inputs: a, b in [0, 2*p434-1]
-// Output: c in [0, 2*p434-1]
-void fpadd434(const digit_t *a, const digit_t *b, digit_t *c) {
-#if defined(S2N_SIKEP434R2_ASM)
- if (s2n_sikep434r2_asm_is_enabled()) {
- fpadd434_asm(a, b, c);
- return;
- }
-#endif
-
- unsigned int i, carry = 0;
- digit_t mask;
-
- for (i = 0; i < NWORDS_FIELD; i++) {
- ADDC(carry, a[i], b[i], carry, c[i]);
- }
-
- carry = 0;
- for (i = 0; i < NWORDS_FIELD; i++) {
- SUBC(carry, c[i], ((const digit_t *) p434x2)[i], carry, c[i]);
- }
- mask = 0 - (digit_t) carry;
-
- carry = 0;
- for (i = 0; i < NWORDS_FIELD; i++) {
- ADDC(carry, c[i], ((const digit_t *) p434x2)[i] & mask, carry, c[i]);
- }
-}
-
-// Modular subtraction, c = a-b mod p434.
-// Inputs: a, b in [0, 2*p434-1]
-// Output: c in [0, 2*p434-1]
-void fpsub434(const digit_t *a, const digit_t *b, digit_t *c) {
-#if defined(S2N_SIKEP434R2_ASM)
- if (s2n_sikep434r2_asm_is_enabled()) {
- fpsub434_asm(a, b, c);
- return;
- }
-#endif
-
- unsigned int i, borrow = 0;
- digit_t mask;
-
- for (i = 0; i < NWORDS_FIELD; i++) {
- SUBC(borrow, a[i], b[i], borrow, c[i]);
- }
- mask = 0 - (digit_t) borrow;
-
- borrow = 0;
- for (i = 0; i < NWORDS_FIELD; i++) {
- ADDC(borrow, c[i], ((const digit_t *) p434x2)[i] & mask, borrow, c[i]);
- }
-}
-
-// Modular negation, a = -a mod p434.
-// Input/output: a in [0, 2*p434-1]
-void fpneg434(digit_t *a) {
- unsigned int i, borrow = 0;
-
- for (i = 0; i < NWORDS_FIELD; i++) {
- SUBC(borrow, ((const digit_t *) p434x2)[i], a[i], borrow, a[i]);
- }
-}
-
-// Modular division by two, c = a/2 mod p434.
-// Input : a in [0, 2*p434-1]
-// Output: c in [0, 2*p434-1]
-void fpdiv2_434(const digit_t *a, digit_t *c) {
- unsigned int i, carry = 0;
- digit_t mask;
-
- mask = 0 - (digit_t)(a[0] & 1); // If a is odd compute a+p434
- for (i = 0; i < NWORDS_FIELD; i++) {
- ADDC(carry, a[i], ((const digit_t *) p434)[i] & mask, carry, c[i]);
- }
-
- mp_shiftr1(c, NWORDS_FIELD);
-}
-
-// Modular correction to reduce field element a in [0, 2*p434-1] to [0, p434-1].
-void fpcorrection434(digit_t *a) {
- unsigned int i, borrow = 0;
- digit_t mask;
-
- for (i = 0; i < NWORDS_FIELD; i++) {
- SUBC(borrow, a[i], ((const digit_t *) p434)[i], borrow, a[i]);
- }
- mask = 0 - (digit_t) borrow;
-
- borrow = 0;
- for (i = 0; i < NWORDS_FIELD; i++) {
- ADDC(borrow, a[i], ((const digit_t *) p434)[i] & mask, borrow, a[i]);
- }
-}
-
-// Digit multiplication, digit * digit -> 2-digit result
-void digit_x_digit(const digit_t a, const digit_t b, digit_t *c) {
- register digit_t al, ah, bl, bh, temp;
- digit_t albl, albh, ahbl, ahbh, res1, res2, res3, carry;
- digit_t mask_low = (digit_t)(-1) >> (sizeof(digit_t) * 4), mask_high = (digit_t)(-1) << (sizeof(digit_t) * 4);
-
- al = a & mask_low; // Low part
- ah = a >> (sizeof(digit_t) * 4); // High part
- bl = b & mask_low;
- bh = b >> (sizeof(digit_t) * 4);
-
- albl = al * bl;
- albh = al * bh;
- ahbl = ah * bl;
- ahbh = ah * bh;
- c[0] = albl & mask_low; // C00
-
- res1 = albl >> (sizeof(digit_t) * 4);
- res2 = ahbl & mask_low;
- res3 = albh & mask_low;
- temp = res1 + res2 + res3;
- carry = temp >> (sizeof(digit_t) * 4);
- c[0] ^= temp << (sizeof(digit_t) * 4); // C01
-
- res1 = ahbl >> (sizeof(digit_t) * 4);
- res2 = albh >> (sizeof(digit_t) * 4);
- res3 = ahbh & mask_low;
- temp = res1 + res2 + res3 + carry;
- c[1] = temp & mask_low; // C10
- carry = temp & mask_high;
- c[1] ^= (ahbh & mask_high) + carry; // C11
-}
-
-// Multiprecision comba multiply, c = a*b, where lng(a) = lng(b) = nwords.
-void mp_mul(const digit_t *a, const digit_t *b, digit_t *c, const unsigned int nwords) {
-#if defined(S2N_SIKEP434R2_ASM)
- if (s2n_sikep434r2_asm_is_enabled()) {
- UNREFERENCED_PARAMETER(nwords);
- mul434_asm(a, b, c);
- return;
- }
-#endif
-
- unsigned int i, j, carry;
- digit_t t = 0, u = 0, v = 0, UV[2];
-
- for (i = 0; i < nwords; i++) {
- for (j = 0; j <= i; j++) {
- MUL(a[j], b[i - j], UV + 1, UV[0]);
- ADDC(0, UV[0], v, carry, v);
- ADDC(carry, UV[1], u, carry, u);
- t += carry;
- }
- c[i] = v;
- v = u;
- u = t;
- t = 0;
- }
-
- for (i = nwords; i < 2 * nwords - 1; i++) {
- for (j = i - nwords + 1; j < nwords; j++) {
- MUL(a[j], b[i - j], UV + 1, UV[0]);
- ADDC(0, UV[0], v, carry, v);
- ADDC(carry, UV[1], u, carry, u);
- t += carry;
- }
- c[i] = v;
- v = u;
- u = t;
- t = 0;
- }
- c[2 * nwords - 1] = v;
-}
-
-// Efficient Montgomery reduction using comba and exploiting the special form of the prime p434.
-// mc = ma*R^-1 mod p434x2, where R = 2^448.
-// If ma < 2^448*p434, the output mc is in the range [0, 2*p434-1].
-// ma is assumed to be in Montgomery representation.
-void rdc_mont(const digit_t *ma, digit_t *mc) {
-#if defined(S2N_SIKEP434R2_ASM)
- if (s2n_sikep434r2_asm_is_enabled()) {
- rdc434_asm(ma, mc);
- return;
- }
-#endif
-
- unsigned int i, j, carry, count = p434_ZERO_WORDS;
- digit_t UV[2], t = 0, u = 0, v = 0;
-
- for (i = 0; i < NWORDS_FIELD; i++) {
- mc[i] = 0;
- }
-
- for (i = 0; i < NWORDS_FIELD; i++) {
- for (j = 0; j < i; j++) {
- if (j < (i - p434_ZERO_WORDS + 1)) {
- MUL(mc[j], ((const digit_t *) p434p1)[i - j], UV + 1, UV[0]);
- ADDC(0, UV[0], v, carry, v);
- ADDC(carry, UV[1], u, carry, u);
- t += carry;
- }
- }
- ADDC(0, v, ma[i], carry, v);
- ADDC(carry, u, 0, carry, u);
- t += carry;
- mc[i] = v;
- v = u;
- u = t;
- t = 0;
- }
-
- for (i = NWORDS_FIELD; i < 2 * NWORDS_FIELD - 1; i++) {
- if (count > 0) {
- count -= 1;
- }
- for (j = i - NWORDS_FIELD + 1; j < NWORDS_FIELD; j++) {
- if (j < (NWORDS_FIELD - count)) {
- MUL(mc[j], ((const digit_t *) p434p1)[i - j], UV + 1, UV[0]);
- ADDC(0, UV[0], v, carry, v);
- ADDC(carry, UV[1], u, carry, u);
- t += carry;
- }
- }
- ADDC(0, v, ma[i], carry, v);
- ADDC(carry, u, 0, carry, u);
- t += carry;
- mc[i - NWORDS_FIELD] = v;
- v = u;
- u = t;
- t = 0;
- }
-
- /* `carry` isn't read after this, but it's still a necessary argument to the macro */
- /* cppcheck-suppress unreadVariable */
- ADDC(0, v, ma[2 * NWORDS_FIELD - 1], carry, v);
- mc[NWORDS_FIELD - 1] = v;
-}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/fpx.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r2/fpx.c
deleted file mode 100644
index e5b356b93b..0000000000
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/fpx.c
+++ /dev/null
@@ -1,387 +0,0 @@
-/********************************************************************************************
-* SIDH: an efficient supersingular isogeny cryptography library
-*
-* Abstract: core functions over GF(p) and GF(p^2)
-*********************************************************************************************/
-
-// Conversion of GF(p^2) element from Montgomery to standard representation, and encoding by removing leading 0 bytes
-void fp2_encode(const f2elm_t *x, unsigned char *enc) {
- unsigned int i;
- f2elm_t t;
-
- from_fp2mont(x, &t);
- for (i = 0; i < FP2_ENCODED_BYTES / 2; i++) {
- enc[i] = ((unsigned char *) t.e)[i];
- enc[i + FP2_ENCODED_BYTES / 2] = ((unsigned char *) t.e)[i + MAXBITS_FIELD / 8];
- }
-}
-
-// Parse byte sequence back into GF(p^2) element, and conversion to Montgomery representation
-void fp2_decode(const unsigned char *enc, f2elm_t *x) {
- unsigned int i;
-
- for (i = 0; i < 2 * (MAXBITS_FIELD / 8); i++)
- ((unsigned char *) x->e)[i] = 0;
- for (i = 0; i < FP2_ENCODED_BYTES / 2; i++) {
- ((unsigned char *) x->e)[i] = enc[i];
- ((unsigned char *) x->e)[i + MAXBITS_FIELD / 8] = enc[i + FP2_ENCODED_BYTES / 2];
- }
- to_fp2mont(x, x);
-}
-
-// Copy a field element, c = a.
-__inline void fpcopy(const felm_t a, felm_t c) {
- unsigned int i;
-
- for (i = 0; i < NWORDS_FIELD; i++)
- c[i] = a[i];
-}
-
-// Zero a field element, a = 0.
-__inline void fpzero(felm_t a) {
- unsigned int i;
-
- for (i = 0; i < NWORDS_FIELD; i++)
- a[i] = 0;
-}
-
-// Conversion to Montgomery representation,
-// mc = a*R^2*R^(-1) mod p = a*R mod p, where a in [0, p-1].
-// The Montgomery constant R^2 mod p is the global value "Montgomery_R2".
-void to_mont(const felm_t a, felm_t mc) {
- fpmul_mont(a, (const digit_t *) &Montgomery_R2, mc);
-}
-
-// Conversion from Montgomery representation to standard representation,
-// c = ma*R^(-1) mod p = a mod p, where ma in [0, p-1].
-void from_mont(const felm_t ma, felm_t c) {
- digit_t one[NWORDS_FIELD] = {0};
-
- one[0] = 1;
- fpmul_mont(ma, one, c);
- fpcorrection(c);
-}
-
-// Copy wordsize digits, c = a, where lng(a) = nwords.
-void copy_words(const digit_t *a, digit_t *c, const unsigned int nwords) {
- unsigned int i;
-
- for (i = 0; i < nwords; i++)
- c[i] = a[i];
-}
-
-// Multiprecision multiplication, c = a*b mod p.
-void fpmul_mont(const felm_t ma, const felm_t mb, felm_t mc) {
- dfelm_t temp = {0};
-
- mp_mul(ma, mb, temp, NWORDS_FIELD);
- rdc_mont(temp, mc);
-}
-
-// Multiprecision squaring, c = a^2 mod p.
-void fpsqr_mont(const felm_t ma, felm_t mc) {
- dfelm_t temp = {0};
-
- mp_mul(ma, ma, temp, NWORDS_FIELD);
- rdc_mont(temp, mc);
-}
-
-// Field inversion using Montgomery arithmetic, a = a^(-1)*R mod p.
-void fpinv_mont(felm_t a) {
- felm_t tt;
-
- fpcopy(a, tt);
- fpinv_chain_mont(tt);
- fpsqr_mont(tt, tt);
- fpsqr_mont(tt, tt);
- fpmul_mont(a, tt, a);
-}
-
-// Copy a GF(p^2) element, c = a.
-void fp2copy(const f2elm_t *a, f2elm_t *c) {
- fpcopy(a->e[0], c->e[0]);
- fpcopy(a->e[1], c->e[1]);
-}
-
-// Zero a GF(p^2) element, a = 0.
-void fp2zero(f2elm_t *a) {
- fpzero(a->e[0]);
- fpzero(a->e[1]);
-}
-
-// GF(p^2) negation, a = -a in GF(p^2).
-void fp2neg(f2elm_t *a) {
- fpneg(a->e[0]);
- fpneg(a->e[1]);
-}
-
-// GF(p^2) addition, c = a+b in GF(p^2).
-__inline void fp2add(const f2elm_t *a, const f2elm_t *b, f2elm_t *c) {
- fpadd(a->e[0], b->e[0], c->e[0]);
- fpadd(a->e[1], b->e[1], c->e[1]);
-}
-
-// GF(p^2) subtraction, c = a-b in GF(p^2).
-__inline void fp2sub(const f2elm_t *a, const f2elm_t *b, f2elm_t *c) {
- fpsub(a->e[0], b->e[0], c->e[0]);
- fpsub(a->e[1], b->e[1], c->e[1]);
-}
-
-// GF(p^2) division by two, c = a/2 in GF(p^2).
-void fp2div2(const f2elm_t *a, f2elm_t *c) {
- fpdiv2(a->e[0], c->e[0]);
- fpdiv2(a->e[1], c->e[1]);
-}
-
-// Modular correction, a = a in GF(p^2).
-void fp2correction(f2elm_t *a) {
- fpcorrection(a->e[0]);
- fpcorrection(a->e[1]);
-}
-
-// Multiprecision addition, c = a+b.
-__inline static void mp_addfast(const digit_t *a, const digit_t *b, digit_t *c) {
-#if defined(S2N_SIKEP434R2_ASM)
- if (s2n_sikep434r2_asm_is_enabled()) {
- mp_add_asm(a, b, c);
- return;
- }
-#endif
-
- mp_add(a, b, c, NWORDS_FIELD);
-}
-
-// GF(p^2) squaring using Montgomery arithmetic, c = a^2 in GF(p^2).
-// Inputs: a = a0+a1*i, where a0, a1 are in [0, 2*p-1]
-// Output: c = c0+c1*i, where c0, c1 are in [0, 2*p-1]
-void fp2sqr_mont(const f2elm_t *a, f2elm_t *c) {
- felm_t t1, t2, t3;
-
- mp_addfast(a->e[0], a->e[1], t1); // t1 = a0+a1
- fpsub(a->e[0], a->e[1], t2); // t2 = a0-a1
- mp_addfast(a->e[0], a->e[0], t3); // t3 = 2a0
- fpmul_mont(t1, t2, c->e[0]); // c0 = (a0+a1)(a0-a1)
- fpmul_mont(t3, a->e[1], c->e[1]); // c1 = 2a0*a1
-}
-
-// Multiprecision subtraction, c = a-b, where lng(a) = lng(b) = nwords. Returns the borrow bit.
-unsigned int mp_sub(const digit_t *a, const digit_t *b, digit_t *c, const unsigned int nwords) {
- unsigned int i, borrow = 0;
-
- for (i = 0; i < nwords; i++)
- SUBC(borrow, a[i], b[i], borrow, c[i]);
-
- return borrow;
-}
-
-// Multiprecision subtraction followed by addition with p*2^MAXBITS_FIELD, c = a-b+(p*2^MAXBITS_FIELD) if a-b < 0, otherwise c=a-b.
-__inline static void mp_subaddfast(const digit_t *a, const digit_t *b, digit_t *c) {
-#if defined(S2N_SIKEP434R2_ASM)
- if (s2n_sikep434r2_asm_is_enabled()) {
- mp_subaddx2_asm(a, b, c);
- return;
- }
-#endif
-
- felm_t t1;
-
- digit_t mask = 0 - (digit_t) mp_sub(a, b, c, 2 * NWORDS_FIELD);
- for (int i = 0; i < NWORDS_FIELD; i++)
- t1[i] = ((const digit_t *) PRIME)[i] & mask;
- mp_addfast((digit_t *) &c[NWORDS_FIELD], t1, (digit_t *) &c[NWORDS_FIELD]);
-}
-
-// Multiprecision subtraction, c = c-a-b, where lng(a) = lng(b) = 2*NWORDS_FIELD.
-__inline static void mp_dblsubfast(const digit_t *a, const digit_t *b, digit_t *c) {
-#if defined(S2N_SIKEP434R2_ASM)
- if (s2n_sikep434r2_asm_is_enabled()) {
- mp_dblsubx2_asm(a, b, c);
- return;
- }
-#endif
-
- mp_sub(c, a, c, 2 * NWORDS_FIELD);
- mp_sub(c, b, c, 2 * NWORDS_FIELD);
-}
-
-// GF(p^2) multiplication using Montgomery arithmetic, c = a*b in GF(p^2).
-// Inputs: a = a0+a1*i and b = b0+b1*i, where a0, a1, b0, b1 are in [0, 2*p-1]
-// Output: c = c0+c1*i, where c0, c1 are in [0, 2*p-1]
-void fp2mul_mont(const f2elm_t *a, const f2elm_t *b, f2elm_t *c) {
- felm_t t1, t2;
- dfelm_t tt1, tt2, tt3;
-
- mp_addfast(a->e[0], a->e[1], t1); // t1 = a0+a1
- mp_addfast(b->e[0], b->e[1], t2); // t2 = b0+b1
- mp_mul(a->e[0], b->e[0], tt1, NWORDS_FIELD); // tt1 = a0*b0
- mp_mul(a->e[1], b->e[1], tt2, NWORDS_FIELD); // tt2 = a1*b1
- mp_mul(t1, t2, tt3, NWORDS_FIELD); // tt3 = (a0+a1)*(b0+b1)
- mp_dblsubfast(tt1, tt2, tt3); // tt3 = (a0+a1)*(b0+b1) - a0*b0 - a1*b1
- mp_subaddfast(tt1, tt2, tt1); // tt1 = a0*b0 - a1*b1 + p*2^MAXBITS_FIELD if a0*b0 - a1*b1 < 0, else tt1 = a0*b0 - a1*b1
- rdc_mont(tt3, c->e[1]); // c[1] = (a0+a1)*(b0+b1) - a0*b0 - a1*b1
- rdc_mont(tt1, c->e[0]); // c[0] = a0*b0 - a1*b1
-}
-
-// Chain to compute a^(p-3)/4 using Montgomery arithmetic.
-void fpinv_chain_mont(felm_t a) {
- unsigned int i, j;
-
- felm_t t[31], tt;
-
- // Precomputed table
- fpsqr_mont(a, tt);
- fpmul_mont(a, tt, t[0]);
- for (i = 0; i <= 29; i++)
- fpmul_mont(t[i], tt, t[i + 1]);
-
- fpcopy(a, tt);
- for (i = 0; i < 7; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[5], tt, tt);
- for (i = 0; i < 10; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[14], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[3], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[23], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[13], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[24], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[7], tt, tt);
- for (i = 0; i < 8; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[12], tt, tt);
- for (i = 0; i < 8; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[30], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[1], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[30], tt, tt);
- for (i = 0; i < 7; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[21], tt, tt);
- for (i = 0; i < 9; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[2], tt, tt);
- for (i = 0; i < 9; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[19], tt, tt);
- for (i = 0; i < 9; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[1], tt, tt);
- for (i = 0; i < 7; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[24], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[26], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[16], tt, tt);
- for (i = 0; i < 7; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[10], tt, tt);
- for (i = 0; i < 7; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[6], tt, tt);
- for (i = 0; i < 7; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[0], tt, tt);
- for (i = 0; i < 9; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[20], tt, tt);
- for (i = 0; i < 8; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[9], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[25], tt, tt);
- for (i = 0; i < 9; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[30], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[26], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(a, tt, tt);
- for (i = 0; i < 7; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[28], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[6], tt, tt);
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[10], tt, tt);
- for (i = 0; i < 9; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[22], tt, tt);
- for (j = 0; j < 35; j++) {
- for (i = 0; i < 6; i++)
- fpsqr_mont(tt, tt);
- fpmul_mont(t[30], tt, tt);
- }
- fpcopy(tt, a);
-}
-
-// GF(p^2) inversion using Montgomery arithmetic, a = (a0-i*a1)/(a0^2+a1^2).
-void fp2inv_mont(f2elm_t *a) {
- f2elm_t t1;
-
- fpsqr_mont(a->e[0], t1.e[0]); // t10 = a0^2
- fpsqr_mont(a->e[1], t1.e[1]); // t11 = a1^2
- fpadd(t1.e[0], t1.e[1], t1.e[0]); // t10 = a0^2+a1^2
- fpinv_mont(t1.e[0]); // t10 = (a0^2+a1^2)^-1
- fpneg(a->e[1]); // a = a0-i*a1
- fpmul_mont(a->e[0], t1.e[0], a->e[0]);
- fpmul_mont(a->e[1], t1.e[0], a->e[1]); // a = (a0-i*a1)*(a0^2+a1^2)^-1
-}
-
-// Conversion of a GF(p^2) element to Montgomery representation,
-// mc_i = a_i*R^2*R^(-1) = a_i*R in GF(p^2).
-void to_fp2mont(const f2elm_t *a, f2elm_t *mc) {
- to_mont(a->e[0], mc->e[0]);
- to_mont(a->e[1], mc->e[1]);
-}
-
-// Conversion of a GF(p^2) element from Montgomery representation to standard representation,
-// c_i = ma_i*R^(-1) = a_i in GF(p^2).
-void from_fp2mont(const f2elm_t *ma, f2elm_t *c) {
- from_mont(ma->e[0], c->e[0]);
- from_mont(ma->e[1], c->e[1]);
-}
-
-// Multiprecision addition, c = a+b, where lng(a) = lng(b) = nwords. Returns the carry bit.
-unsigned int mp_add(const digit_t *a, const digit_t *b, digit_t *c, const unsigned int nwords) {
- unsigned int i, carry = 0;
-
- for (i = 0; i < nwords; i++) {
- /* cppcheck-suppress shiftTooManyBits */
- /* cppcheck-suppress unmatchedSuppression */
- ADDC(carry, a[i], b[i], carry, c[i]);
- }
-
- return carry;
-}
-
-// Multiprecision right shift by one.
-void mp_shiftr1(digit_t *x, const unsigned int nwords) {
- unsigned int i;
-
- for (i = 0; i < nwords - 1; i++) {
- SHIFTR(x[i + 1], x[i], 1, x[i], RADIX);
- }
- x[nwords - 1] >>= 1;
-}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/sidh.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r2/sidh.c
deleted file mode 100644
index d3fdbe722c..0000000000
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/sidh.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/********************************************************************************************
-* SIDH: an efficient supersingular isogeny cryptography library
-*
-* Abstract: ephemeral supersingular isogeny Diffie-Hellman key exchange (SIDH)
-*********************************************************************************************/
-
-#include "../s2n_pq_random.h"
-#include "utils/s2n_safety.h"
-
-static void init_basis(const digit_t *gen, f2elm_t *XP, f2elm_t *XQ, f2elm_t *XR) { // Initialization of basis points
-
- fpcopy(gen, XP->e[0]);
- fpcopy(gen + NWORDS_FIELD, XP->e[1]);
- fpcopy(gen + 2 * NWORDS_FIELD, XQ->e[0]);
- fpcopy(gen + 3 * NWORDS_FIELD, XQ->e[1]);
- fpcopy(gen + 4 * NWORDS_FIELD, XR->e[0]);
- fpcopy(gen + 5 * NWORDS_FIELD, XR->e[1]);
-}
-
-int random_mod_order_A(unsigned char *random_digits) { // Generation of Alice's secret key
- // Outputs random value in [0, 2^eA - 1]
- GUARD_AS_POSIX(s2n_get_random_bytes(random_digits, SECRETKEY_A_BYTES));
- random_digits[SECRETKEY_A_BYTES - 1] &= MASK_ALICE; // Masking last byte
- return S2N_SUCCESS;
-}
-
-int random_mod_order_B(unsigned char *random_digits) { // Generation of Bob's secret key
- // Outputs random value in [0, 2^Floor(Log(2, oB)) - 1]
- GUARD_AS_POSIX(s2n_get_random_bytes(random_digits, SECRETKEY_B_BYTES));
- random_digits[SECRETKEY_B_BYTES - 1] &= MASK_BOB; // Masking last byte
- return S2N_SUCCESS;
-}
-
-int EphemeralKeyGeneration_A(const digit_t *PrivateKeyA, unsigned char *PublicKeyA) { // Alice's ephemeral public key generation
- // Input: a private key PrivateKeyA in the range [0, 2^eA - 1].
- // Output: the public key PublicKeyA consisting of 3 elements in GF(p^2) which are encoded by removing leading 0 bytes.
- point_proj_t R, phiP = {0}, phiQ = {0}, phiR = {0}, pts[MAX_INT_POINTS_ALICE];
- f2elm_t _XPA, _XQA, _XRA, coeff[3], _A24plus = {0}, _C24 = {0}, _A = {0};
- f2elm_t *XPA=&_XPA, *XQA=&_XQA, *XRA=&_XRA, *A24plus=&_A24plus, *C24=&_C24, *A=&_A;
- unsigned int i, row, m, index = 0, pts_index[MAX_INT_POINTS_ALICE], npts = 0, ii = 0;
-
- // Initialize basis points
- init_basis((const digit_t *) A_gen, XPA, XQA, XRA);
- init_basis((const digit_t *) B_gen, &phiP->X, &phiQ->X, &phiR->X);
- fpcopy((const digit_t *) &Montgomery_one, (phiP->Z.e)[0]);
- fpcopy((const digit_t *) &Montgomery_one, (phiQ->Z.e)[0]);
- fpcopy((const digit_t *) &Montgomery_one, (phiR->Z.e)[0]);
-
- // Initialize constants: A24plus = A+2C, C24 = 4C, where A=6, C=1
- fpcopy((const digit_t *) &Montgomery_one, A24plus->e[0]);
- fp2add(A24plus, A24plus, A24plus);
- fp2add(A24plus, A24plus, C24);
- fp2add(A24plus, C24, A);
- fp2add(C24, C24, A24plus);
-
- // Retrieve kernel point
- LADDER3PT(XPA, XQA, XRA, PrivateKeyA, ALICE, R, A);
-
- // Traverse tree
- index = 0;
- for (row = 1; row < MAX_Alice; row++) {
- while (index < MAX_Alice - row) {
- fp2copy(&R->X, &pts[npts]->X);
- fp2copy(&R->Z, &pts[npts]->Z);
- pts_index[npts++] = index;
- m = strat_Alice[ii++];
- xDBLe(R, R, A24plus, C24, (int) (2 * m));
- index += m;
- }
- get_4_isog(R, A24plus, C24, coeff);
-
- for (i = 0; i < npts; i++) {
- eval_4_isog(pts[i], coeff);
- }
- eval_4_isog(phiP, coeff);
- eval_4_isog(phiQ, coeff);
- eval_4_isog(phiR, coeff);
-
- fp2copy(&pts[npts - 1]->X, &R->X);
- fp2copy(&pts[npts - 1]->Z, &R->Z);
- index = pts_index[npts - 1];
- npts -= 1;
- }
-
- get_4_isog(R, A24plus, C24, coeff);
- eval_4_isog(phiP, coeff);
- eval_4_isog(phiQ, coeff);
- eval_4_isog(phiR, coeff);
-
- inv_3_way(&phiP->Z, &phiQ->Z, &phiR->Z);
- fp2mul_mont(&phiP->X, &phiP->Z, &phiP->X);
- fp2mul_mont(&phiQ->X, &phiQ->Z, &phiQ->X);
- fp2mul_mont(&phiR->X, &phiR->Z, &phiR->X);
-
- // Format public key
- fp2_encode(&phiP->X, PublicKeyA);
- fp2_encode(&phiQ->X, PublicKeyA + FP2_ENCODED_BYTES);
- fp2_encode(&phiR->X, PublicKeyA + 2 * FP2_ENCODED_BYTES);
-
- return 0;
-}
-
-int EphemeralKeyGeneration_B(const digit_t *PrivateKeyB, unsigned char *PublicKeyB) { // Bob's ephemeral public key generation
- // Input: a private key PrivateKeyB in the range [0, 2^Floor(Log(2,oB)) - 1].
- // Output: the public key PublicKeyB consisting of 3 elements in GF(p^2) which are encoded by removing leading 0 bytes.
- point_proj_t R, phiP = {0}, phiQ = {0}, phiR = {0}, pts[MAX_INT_POINTS_BOB];
- f2elm_t _XPB, _XQB, _XRB, coeff[3], _A24plus = {0}, _A24minus = {0}, _A = {0};
- f2elm_t *XPB=&_XPB, *XQB=&_XQB, *XRB=&_XRB, *A24plus=&_A24plus, *A24minus=&_A24minus, *A=&_A;
- unsigned int i, row, m, index = 0, pts_index[MAX_INT_POINTS_BOB], npts = 0, ii = 0;
-
- // Initialize basis points
- init_basis((const digit_t *) B_gen, XPB, XQB, XRB);
- init_basis((const digit_t *) A_gen, &phiP->X, &phiQ->X, &phiR->X);
- fpcopy((const digit_t *) &Montgomery_one, (phiP->Z.e)[0]);
- fpcopy((const digit_t *) &Montgomery_one, (phiQ->Z.e)[0]);
- fpcopy((const digit_t *) &Montgomery_one, (phiR->Z.e)[0]);
-
- // Initialize constants: A24minus = A-2C, A24plus = A+2C, where A=6, C=1
- fpcopy((const digit_t *) &Montgomery_one, A24plus->e[0]);
- fp2add(A24plus, A24plus, A24plus);
- fp2add(A24plus, A24plus, A24minus);
- fp2add(A24plus, A24minus, A);
- fp2add(A24minus, A24minus, A24plus);
-
- // Retrieve kernel point
- LADDER3PT(XPB, XQB, XRB, PrivateKeyB, BOB, R, A);
-
- // Traverse tree
- index = 0;
- for (row = 1; row < MAX_Bob; row++) {
- while (index < MAX_Bob - row) {
- fp2copy(&R->X, &pts[npts]->X);
- fp2copy(&R->Z, &pts[npts]->Z);
- pts_index[npts++] = index;
- m = strat_Bob[ii++];
- xTPLe(R, R, A24minus, A24plus, (int) m);
- index += m;
- }
- get_3_isog(R, A24minus, A24plus, coeff);
-
- for (i = 0; i < npts; i++) {
- eval_3_isog(pts[i], coeff);
- }
- eval_3_isog(phiP, coeff);
- eval_3_isog(phiQ, coeff);
- eval_3_isog(phiR, coeff);
-
- fp2copy(&pts[npts - 1]->X, &R->X);
- fp2copy(&pts[npts - 1]->Z, &R->Z);
- index = pts_index[npts - 1];
- npts -= 1;
- }
-
- get_3_isog(R, A24minus, A24plus, coeff);
- eval_3_isog(phiP, coeff);
- eval_3_isog(phiQ, coeff);
- eval_3_isog(phiR, coeff);
-
- inv_3_way(&phiP->Z, &phiQ->Z, &phiR->Z);
- fp2mul_mont(&phiP->X, &phiP->Z, &phiP->X);
- fp2mul_mont(&phiQ->X, &phiQ->Z, &phiQ->X);
- fp2mul_mont(&phiR->X, &phiR->Z, &phiR->X);
-
- // Format public key
- fp2_encode(&phiP->X, PublicKeyB);
- fp2_encode(&phiQ->X, PublicKeyB + FP2_ENCODED_BYTES);
- fp2_encode(&phiR->X, PublicKeyB + 2 * FP2_ENCODED_BYTES);
-
- return 0;
-}
-
-int EphemeralSecretAgreement_A(const digit_t *PrivateKeyA, const unsigned char *PublicKeyB, unsigned char *SharedSecretA) { // Alice's ephemeral shared secret computation
- // It produces a shared secret key SharedSecretA using her secret key PrivateKeyA and Bob's public key PublicKeyB
- // Inputs: Alice's PrivateKeyA is an integer in the range [0, oA-1].
- // Bob's PublicKeyB consists of 3 elements in GF(p^2) encoded by removing leading 0 bytes.
- // Output: a shared secret SharedSecretA that consists of one element in GF(p^2) encoded by removing leading 0 bytes.
- point_proj_t R, pts[MAX_INT_POINTS_ALICE];
- f2elm_t coeff[3], PKB[3], _jinv;
- f2elm_t _A24plus = {0}, _C24 = {0}, _A = {0};
- f2elm_t *jinv=&_jinv, *A24plus=&_A24plus, *C24=&_C24, *A=&_A;
- unsigned int i, row, m, index = 0, pts_index[MAX_INT_POINTS_ALICE], npts = 0, ii = 0;
-
- // Initialize images of Bob's basis
- fp2_decode(PublicKeyB, &PKB[0]);
- fp2_decode(PublicKeyB + FP2_ENCODED_BYTES, &PKB[1]);
- fp2_decode(PublicKeyB + 2 * FP2_ENCODED_BYTES, &PKB[2]);
-
- // Initialize constants: A24plus = A+2C, C24 = 4C, where C=1
- get_A(&PKB[0], &PKB[1], &PKB[2], A);
- fpadd((const digit_t *) &Montgomery_one, (const digit_t *) &Montgomery_one, C24->e[0]);
- fp2add(A, C24, A24plus);
- fpadd(C24->e[0], C24->e[0], C24->e[0]);
-
- // Retrieve kernel point
- LADDER3PT(&PKB[0], &PKB[1], &PKB[2], PrivateKeyA, ALICE, R, A);
-
- // Traverse tree
- index = 0;
- for (row = 1; row < MAX_Alice; row++) {
- while (index < MAX_Alice - row) {
- fp2copy(&R->X, &pts[npts]->X);
- fp2copy(&R->Z, &pts[npts]->Z);
- pts_index[npts++] = index;
- m = strat_Alice[ii++];
- xDBLe(R, R, A24plus, C24, (int) (2 * m));
- index += m;
- }
- get_4_isog(R, A24plus, C24, coeff);
-
- for (i = 0; i < npts; i++) {
- eval_4_isog(pts[i], coeff);
- }
-
- fp2copy(&pts[npts - 1]->X, &R->X);
- fp2copy(&pts[npts - 1]->Z, &R->Z);
- index = pts_index[npts - 1];
- npts -= 1;
- }
-
- get_4_isog(R, A24plus, C24, coeff);
- fp2add(A24plus, A24plus, A24plus);
- fp2sub(A24plus, C24, A24plus);
- fp2add(A24plus, A24plus, A24plus);
- j_inv(A24plus, C24, jinv);
- fp2_encode(jinv, SharedSecretA); // Format shared secret
-
- return 0;
-}
-
-int EphemeralSecretAgreement_B(const digit_t *PrivateKeyB, const unsigned char *PublicKeyA, unsigned char *SharedSecretB) { // Bob's ephemeral shared secret computation
- // It produces a shared secret key SharedSecretB using his secret key PrivateKeyB and Alice's public key PublicKeyA
- // Inputs: Bob's PrivateKeyB is an integer in the range [0, 2^Floor(Log(2,oB)) - 1].
- // Alice's PublicKeyA consists of 3 elements in GF(p^2) encoded by removing leading 0 bytes.
- // Output: a shared secret SharedSecretB that consists of one element in GF(p^2) encoded by removing leading 0 bytes.
- point_proj_t R, pts[MAX_INT_POINTS_BOB];
- f2elm_t coeff[3], PKB[3], _jinv;
- f2elm_t _A24plus = {0}, _A24minus = {0}, _A = {0};
- f2elm_t *jinv=&_jinv, *A24plus=&_A24plus, *A24minus=&_A24minus, *A=&_A;
- unsigned int i, row, m, index = 0, pts_index[MAX_INT_POINTS_BOB], npts = 0, ii = 0;
-
- // Initialize images of Alice's basis
- fp2_decode(PublicKeyA, &PKB[0]);
- fp2_decode(PublicKeyA + FP2_ENCODED_BYTES, &PKB[1]);
- fp2_decode(PublicKeyA + 2 * FP2_ENCODED_BYTES, &PKB[2]);
-
- // Initialize constants: A24plus = A+2C, A24minus = A-2C, where C=1
- get_A(&PKB[0], &PKB[1], &PKB[2], A);
- fpadd((const digit_t *) &Montgomery_one, (const digit_t *) &Montgomery_one, A24minus->e[0]);
- fp2add(A, A24minus, A24plus);
- fp2sub(A, A24minus, A24minus);
-
- // Retrieve kernel point
- LADDER3PT(&PKB[0], &PKB[1], &PKB[2], PrivateKeyB, BOB, R, A);
-
- // Traverse tree
- index = 0;
- for (row = 1; row < MAX_Bob; row++) {
- while (index < MAX_Bob - row) {
- fp2copy(&R->X, &pts[npts]->X);
- fp2copy(&R->Z, &pts[npts]->Z);
- pts_index[npts++] = index;
- m = strat_Bob[ii++];
- xTPLe(R, R, A24minus, A24plus, (int) m);
- index += m;
- }
- get_3_isog(R, A24minus, A24plus, coeff);
-
- for (i = 0; i < npts; i++) {
- eval_3_isog(pts[i], coeff);
- }
-
- fp2copy(&pts[npts - 1]->X, &R->X);
- fp2copy(&pts[npts - 1]->Z, &R->Z);
- index = pts_index[npts - 1];
- npts -= 1;
- }
-
- get_3_isog(R, A24minus, A24plus, coeff);
- fp2add(A24plus, A24minus, A);
- fp2add(A, A, A);
- fp2sub(A24plus, A24minus, A24plus);
- j_inv(A, A24plus, jinv);
- fp2_encode(jinv, SharedSecretB); // Format shared secret
-
- return 0;
-}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/sike_r2_kem.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r2/sike_r2_kem.c
deleted file mode 100644
index 7768ad3650..0000000000
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/sike_r2_kem.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/********************************************************************************************
-* SIDH: an efficient supersingular isogeny cryptography library
-*
-* Abstract: supersingular isogeny key encapsulation (SIKE) protocol
-*********************************************************************************************/
-
-#include <string.h>
-#include "../s2n_pq_random.h"
-#include "fips202.h"
-#include "utils/s2n_safety.h"
-#include "tls/s2n_kem.h"
-#include "pq-crypto/s2n_pq.h"
-
-int SIKE_P434_r2_crypto_kem_keypair(unsigned char *pk, unsigned char *sk) {
- // SIKE's key generation
- // Outputs: secret key sk (CRYPTO_SECRETKEYBYTES = MSG_BYTES + SECRETKEY_B_BYTES + CRYPTO_PUBLICKEYBYTES bytes)
- // public key pk (CRYPTO_PUBLICKEYBYTES bytes)
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
-
- digit_t _sk[(SECRETKEY_B_BYTES / sizeof(digit_t)) + 1];
-
- // Generate lower portion of secret key sk <- s||SK
- GUARD_AS_POSIX(s2n_get_random_bytes(sk, MSG_BYTES));
- GUARD(random_mod_order_B((unsigned char *)_sk));
-
- // Generate public key pk
- EphemeralKeyGeneration_B(_sk, pk);
-
- memcpy(sk + MSG_BYTES, _sk, SECRETKEY_B_BYTES);
-
- // Append public key pk to secret key sk
- memcpy(&sk[MSG_BYTES + SECRETKEY_B_BYTES], pk, CRYPTO_PUBLICKEYBYTES);
-
- return 0;
-}
-
-int SIKE_P434_r2_crypto_kem_enc(unsigned char *ct, unsigned char *ss, const unsigned char *pk) {
- // SIKE's encapsulation
- // Input: public key pk (CRYPTO_PUBLICKEYBYTES bytes)
- // Outputs: shared secret ss (CRYPTO_BYTES bytes)
- // ciphertext message ct (CRYPTO_CIPHERTEXTBYTES = CRYPTO_PUBLICKEYBYTES + MSG_BYTES bytes)
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
-
- union {
- unsigned char b[SECRETKEY_A_BYTES];
- digit_t d[SECRETKEY_A_BYTES/sizeof(digit_t)];
- } ephemeralsk;
- unsigned char jinvariant[FP2_ENCODED_BYTES];
- unsigned char h[MSG_BYTES];
- unsigned char temp[CRYPTO_CIPHERTEXTBYTES + MSG_BYTES];
-
- // Generate ephemeralsk <- G(m||pk) mod oA
- GUARD_AS_POSIX(s2n_get_random_bytes(temp, MSG_BYTES));
- memcpy(&temp[MSG_BYTES], pk, CRYPTO_PUBLICKEYBYTES);
- shake256(ephemeralsk.b, SECRETKEY_A_BYTES, temp, CRYPTO_PUBLICKEYBYTES + MSG_BYTES);
-
- /* ephemeralsk is a union; the memory set here through .b will get accessed through the .d member later */
- /* cppcheck-suppress unreadVariable */
- /* cppcheck-suppress unmatchedSuppression */
- ephemeralsk.b[SECRETKEY_A_BYTES - 1] &= MASK_ALICE;
-
- // Encrypt
- EphemeralKeyGeneration_A(ephemeralsk.d, ct);
- EphemeralSecretAgreement_A(ephemeralsk.d, pk, jinvariant);
- shake256(h, MSG_BYTES, jinvariant, FP2_ENCODED_BYTES);
- for (int i = 0; i < MSG_BYTES; i++) {
- ct[i + CRYPTO_PUBLICKEYBYTES] = temp[i] ^ h[i];
- }
- // Generate shared secret ss <- H(m||ct)
- memcpy(&temp[MSG_BYTES], ct, CRYPTO_CIPHERTEXTBYTES);
- shake256(ss, CRYPTO_BYTES, temp, CRYPTO_CIPHERTEXTBYTES + MSG_BYTES);
-
- return 0;
-}
-
-int SIKE_P434_r2_crypto_kem_dec(unsigned char *ss, const unsigned char *ct, const unsigned char *sk) {
- // SIKE's decapsulation
- // Input: secret key sk (CRYPTO_SECRETKEYBYTES = MSG_BYTES + SECRETKEY_B_BYTES + CRYPTO_PUBLICKEYBYTES bytes)
- // ciphertext message ct (CRYPTO_CIPHERTEXTBYTES = CRYPTO_PUBLICKEYBYTES + MSG_BYTES bytes)
- // Outputs: shared secret ss (CRYPTO_BYTES bytes)
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
-
- union {
- unsigned char b[SECRETKEY_A_BYTES];
- digit_t d[SECRETKEY_A_BYTES/sizeof(digit_t)];
- } ephemeralsk_;
- unsigned char jinvariant_[FP2_ENCODED_BYTES];
- unsigned char h_[MSG_BYTES];
- unsigned char c0_[CRYPTO_PUBLICKEYBYTES];
- unsigned char temp[CRYPTO_CIPHERTEXTBYTES + MSG_BYTES];
-
- digit_t _sk[(SECRETKEY_B_BYTES / sizeof(digit_t)) + 1];
- memcpy(_sk, sk + MSG_BYTES, SECRETKEY_B_BYTES);
-
- // Decrypt
- EphemeralSecretAgreement_B(_sk, ct, jinvariant_);
- shake256(h_, MSG_BYTES, jinvariant_, FP2_ENCODED_BYTES);
- for (int i = 0; i < MSG_BYTES; i++) {
- temp[i] = ct[i + CRYPTO_PUBLICKEYBYTES] ^ h_[i];
- }
- // Generate ephemeralsk_ <- G(m||pk) mod oA
- memcpy(&temp[MSG_BYTES], &sk[MSG_BYTES + SECRETKEY_B_BYTES], CRYPTO_PUBLICKEYBYTES);
- shake256(ephemeralsk_.b, SECRETKEY_A_BYTES, temp, CRYPTO_PUBLICKEYBYTES + MSG_BYTES);
-
- /* ephemeralsk_ is a union; the memory set here through .b will get accessed through the .d member later */
- /* cppcheck-suppress unreadVariable */
- /* cppcheck-suppress uninitvar */
- /* cppcheck-suppress unmatchedSuppression */
- ephemeralsk_.b[SECRETKEY_A_BYTES - 1] &= MASK_ALICE;
-
- // Generate shared secret ss <- H(m||ct) or output ss <- H(s||ct)
- EphemeralKeyGeneration_A(ephemeralsk_.d, c0_);
- if (memcmp(c0_, ct, CRYPTO_PUBLICKEYBYTES) != 0) {
- memcpy(temp, sk, MSG_BYTES);
- }
- memcpy(&temp[MSG_BYTES], ct, CRYPTO_CIPHERTEXTBYTES);
- shake256(ss, CRYPTO_BYTES, temp, CRYPTO_CIPHERTEXTBYTES + MSG_BYTES);
-
- return 0;
-}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/sikep434r2_fp_x64_asm.S b/contrib/restricted/aws/s2n/pq-crypto/sike_r2/sikep434r2_fp_x64_asm.S
deleted file mode 100644
index 831fc1b7fb..0000000000
--- a/contrib/restricted/aws/s2n/pq-crypto/sike_r2/sikep434r2_fp_x64_asm.S
+++ /dev/null
@@ -1,962 +0,0 @@
-//*******************************************************************************************
-// SIDH: an efficient supersingular isogeny cryptography library
-//
-// Abstract: field arithmetic in x64 assembly for P434 on Linux
-//*******************************************************************************************
-
-.intel_syntax noprefix
-
-/* Requires bmi2 instruction set for mulx. adx instructions are optional, but preferred. */
-
-// Registers that are used for parameter passing:
-#define reg_p1 rdi
-#define reg_p2 rsi
-#define reg_p3 rdx
-
-// Define addition instructions
-#ifdef S2N_ADX
-
-#define ADD1 adox
-#define ADC1 adox
-#define ADD2 adcx
-#define ADC2 adcx
-
-#else // S2N_ADX
-
-#define ADD1 add
-#define ADC1 adc
-#define ADD2 add
-#define ADC2 adc
-
-#endif // S2N_ADX
-
-// The constants below (asm_p434, asm_p434p1, and asm_p434x2) are duplicated from
-// P434.c, and correspond to the arrays p434, p434p1, and p434x2. The values are
-// idenctical; they are just represented here as standard (base 10) ints, instead
-// of hex. If, for any reason, the constants are changed in one file, they should be
-// updated in the other file as well.
-
-.text
-.align 32
-.type asm_p434, @object
-.size asm_p434, 56
-asm_p434:
- .quad -1
- .quad -1
- .quad -1
- .quad -161717841442111489
- .quad 8918917783347572387
- .quad 7853257225132122198
- .quad 620258357900100
-.align 32
-.type asm_p434p1, @object
-.size asm_p434p1, 56
-asm_p434p1:
- .quad 0
- .quad 0
- .quad 0
- .quad -161717841442111488
- .quad 8918917783347572387
- .quad 7853257225132122198
- .quad 620258357900100
-.align 32
-.type asm_p434x2, @object
-.size asm_p434x2, 56
-asm_p434x2:
- .quad -2
- .quad -1
- .quad -1
- .quad -323435682884222977
- .quad -608908507014406841
- .quad -2740229623445307220
- .quad 1240516715800200
-
-//***********************************************************************
-// Field addition
-// Operation: c [reg_p3] = a [reg_p1] + b [reg_p2]
-//***********************************************************************
-.global fpadd434_asm
-fpadd434_asm:
- push r12
- push r13
- push r14
- push r15
- push rbx
- push rbp
-
- xor rax, rax
- mov r8, [reg_p1]
- mov r9, [reg_p1+8]
- mov r10, [reg_p1+16]
- mov r11, [reg_p1+24]
- mov r12, [reg_p1+32]
- mov r13, [reg_p1+40]
- mov r14, [reg_p1+48]
- add r8, [reg_p2]
- adc r9, [reg_p2+8]
- adc r10, [reg_p2+16]
- adc r11, [reg_p2+24]
- adc r12, [reg_p2+32]
- adc r13, [reg_p2+40]
- adc r14, [reg_p2+48]
-
- mov rbx, [rip+asm_p434x2]
- sub r8, rbx
- mov rcx, [rip+asm_p434x2+8]
- sbb r9, rcx
- sbb r10, rcx
- mov rdi, [rip+asm_p434x2+24]
- sbb r11, rdi
- mov rsi, [rip+asm_p434x2+32]
- sbb r12, rsi
- mov rbp, [rip+asm_p434x2+40]
- sbb r13, rbp
- mov r15, [rip+asm_p434x2+48]
- sbb r14, r15
- sbb rax, 0
-
- and rbx, rax
- and rcx, rax
- and rdi, rax
- and rsi, rax
- and rbp, rax
- and r15, rax
-
- add r8, rbx
- adc r9, rcx
- adc r10, rcx
- adc r11, rdi
- adc r12, rsi
- adc r13, rbp
- adc r14, r15
- mov [reg_p3], r8
- mov [reg_p3+8], r9
- mov [reg_p3+16], r10
- mov [reg_p3+24], r11
- mov [reg_p3+32], r12
- mov [reg_p3+40], r13
- mov [reg_p3+48], r14
-
- pop rbp
- pop rbx
- pop r15
- pop r14
- pop r13
- pop r12
- ret
-
-//***********************************************************************
-// Field subtraction
-// Operation: c [reg_p3] = a [reg_p1] - b [reg_p2]
-//***********************************************************************
-.global fpsub434_asm
-fpsub434_asm:
- push r12
- push r13
- push r14
-
- xor rax, rax
- mov r8, [reg_p1]
- mov r9, [reg_p1+8]
- mov r10, [reg_p1+16]
- mov r11, [reg_p1+24]
- mov r12, [reg_p1+32]
- mov r13, [reg_p1+40]
- mov r14, [reg_p1+48]
- sub r8, [reg_p2]
- sbb r9, [reg_p2+8]
- sbb r10, [reg_p2+16]
- sbb r11, [reg_p2+24]
- sbb r12, [reg_p2+32]
- sbb r13, [reg_p2+40]
- sbb r14, [reg_p2+48]
- sbb rax, 0
-
- mov rcx, [rip+asm_p434x2]
- mov rdi, [rip+asm_p434x2+8]
- mov rsi, [rip+asm_p434x2+24]
- and rcx, rax
- and rdi, rax
- and rsi, rax
- add r8, rcx
- adc r9, rdi
- adc r10, rdi
- adc r11, rsi
- mov [reg_p3], r8
- mov [reg_p3+8], r9
- mov [reg_p3+16], r10
- mov [reg_p3+24], r11
- setc cl
-
- mov r8, [rip+asm_p434x2+32]
- mov rdi, [rip+asm_p434x2+40]
- mov rsi, [rip+asm_p434x2+48]
- and r8, rax
- and rdi, rax
- and rsi, rax
- bt rcx, 0
- adc r12, r8
- adc r13, rdi
- adc r14, rsi
- mov [reg_p3+32], r12
- mov [reg_p3+40], r13
- mov [reg_p3+48], r14
-
- pop r14
- pop r13
- pop r12
- ret
-
-///////////////////////////////////////////////////////////////// MACRO
-// Schoolbook integer multiplication, a full row at a time
-// Inputs: memory pointers M0 and M1
-// Outputs: memory pointer C
-// Temps: regs T0:T9
-/////////////////////////////////////////////////////////////////
-
-#ifdef S2N_ADX
-.macro MUL192_SCHOOL M0, M1, C, T0, T1, T2, T3, T4, T5, T6
- mov rdx, \M0
- mulx \T0, \T1, \M1 // T0:T1 = A0*B0
- mov \C, \T1 // C0_final
- mulx \T1, \T2, 8\M1 // T1:T2 = A0*B1
- xor rax, rax
- adox \T0, \T2
- mulx \T2, \T3, 16\M1 // T2:T3 = A0*B2
- adox \T1, \T3
-
- mov rdx, 8\M0
- mulx \T3, \T4, \M1 // T3:T4 = A1*B0
- adox \T2, rax
- xor rax, rax
- mulx \T5, \T6, 8\M1 // T5:T6 = A1*B1
- adox \T4, \T0
- mov 8\C, \T4 // C1_final
- adcx \T3, \T6
- mulx \T6, \T0, 16\M1 // T6:T0 = A1*B2
- adox \T3, \T1
- adcx \T5, \T0
- adcx \T6, rax
- adox \T5, \T2
-
- mov rdx, 16\M0
- mulx \T1, \T0, \M1 // T1:T0 = A2*B0
- adox \T6, rax
- xor rax, rax
- mulx \T4, \T2, 8\M1 // T4:T2 = A2*B1
- adox \T0, \T3
- mov 16\C, \T0 // C2_final
- adcx \T1, \T5
- mulx \T0, \T3, 16\M1 // T0:T3 = A2*B2
- adcx \T4, \T6
- adcx \T0, rax
- adox \T1, \T2
- adox \T3, \T4
- adox \T0, rax
- mov 24\C, \T1 // C3_final
- mov 32\C, \T3 // C4_final
- mov 40\C, \T0 // C5_final
-.endm
-
-.macro MUL256_SCHOOL M0, M1, C, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9
- mov rdx, \M0
- mulx \T0, \T1, \M1 // T0:T1 = A0*B0
- mov \C, \T1 // C0_final
- mulx \T1, \T2, 8\M1 // T1:T2 = A0*B1
- xor rax, rax
- adox \T0, \T2
- mulx \T2, \T3, 16\M1 // T2:T3 = A0*B2
- adox \T1, \T3
- mulx \T3, \T4, 24\M1 // T3:T4 = A0*B3
- adox \T2, \T4
-
- mov rdx, 8\M0
- mulx \T5, \T4, \M1 // T5:T4 = A1*B0
- adox \T3, rax
- xor rax, rax
- mulx \T6, \T7, 8\M1 // T6:T7 = A1*B1
- adox \T4, \T0
- mov 8\C, \T4 // C1_final
- adcx \T5, \T7
- mulx \T7, \T8, 16\M1 // T7:T8 = A1*B2
- adcx \T6, \T8
- adox \T5, \T1
- mulx \T8, \T9, 24\M1 // T8:T9 = A1*B3
- adcx \T7, \T9
- adcx \T8, rax
- adox \T6, \T2
-
- mov rdx, 16\M0
- mulx \T1, \T0, \M1 // T1:T0 = A2*B0
- adox \T7, \T3
- adox \T8, rax
- xor rax, rax
- mulx \T2, \T3, 8\M1 // T2:T3 = A2*B1
- adox \T0, \T5
- mov 16\C, \T0 // C2_final
- adcx \T1, \T3
- mulx \T3, \T4, 16\M1 // T3:T4 = A2*B2
- adcx \T2, \T4
- adox \T1, \T6
- mulx \T4,\T9, 24\M1 // T3:T4 = A2*B3
- adcx \T3, \T9
- mov rdx, 24\M0
- adcx \T4, rax
-
- adox \T2, \T7
- adox \T3, \T8
- adox \T4, rax
-
- mulx \T5, \T0, \M1 // T5:T0 = A3*B0
- xor rax, rax
- mulx \T6, \T7, 8\M1 // T6:T7 = A3*B1
- adcx \T5, \T7
- adox \T1, \T0
- mulx \T7, \T8, 16\M1 // T7:T8 = A3*B2
- adcx \T6, \T8
- adox \T2, \T5
- mulx \T8, \T9, 24\M1 // T8:T9 = A3*B3
- adcx \T7, \T9
- adcx \T8, rax
-
- adox \T3, \T6
- adox \T4, \T7
- adox \T8, rax
- mov 24\C, \T1 // C3_final
- mov 32\C, \T2 // C4_final
- mov 40\C, \T3 // C5_final
- mov 48\C, \T4 // C6_final
- mov 56\C, \T8 // C7_final
-.endm
-
-#else // S2N_ADX
-
-.macro MUL192_SCHOOL M0, M1, C, T0, T1, T2, T3, T4, T5, T6
- mov rdx, \M0
- mulx \T0, \T1, \M1 // T0:T1 = A0*B0
- mov \C, \T1 // C0_final
- mulx \T1, \T2, 8\M1 // T1:T2 = A0*B1
- add \T0, \T2
- mulx \T2, \T3, 16\M1 // T2:T3 = A0*B2
- adc \T1, \T3
-
- mov rdx, 8\M0
- mulx \T3, \T4, \M1 // T3:T4 = A1*B0
- adc \T2, 0
- mulx \T5, \T6, 8\M1 // T5:T6 = A1*B1
- add \T4, \T0
- mov 8\C, \T4 // C1_final
- adc \T3, \T1
- adc \T5, \T2
- mulx \T0, \T1, 16\M1 // T0:T1 = A1*B2
- adc \T0, 0
-
- add \T3, \T6
- adc \T5, \T1
- adc \T0, 0
-
- mov rdx, 16\M0
- mulx \T1, \T2, \M1 // T1:T2 = A2*B0
- add \T2, \T3
- mov 16\C, \T2 // C2_final
- mulx \T4, \T6, 8\M1 // T4:T6 = A2*B1
- adc \T1, \T5
- adc \T0, \T4
- mulx \T2, \T3, 16\M1 // T0:T3 = A2*B2
- adc \T2, 0
- add \T1, \T6
- adc \T0, \T3
- adc \T2, 0
- mov 24\C, \T1 // C3_final
- mov 32\C, \T0 // C4_final
- mov 40\C, \T2 // C5_final
-.endm
-
-.macro MUL256_SCHOOL M0, M1, C, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9
- mov rdx, \M0
- mulx \T0, \T1, \M1 // T0:T1 = A0*B0
- mov \C, \T1 // C0_final
- mulx \T1, \T2, 8\M1 // T1:T2 = A0*B1
- add \T0, \T2
- mulx \T2, \T3, 16\M1 // T2:T3 = A0*B2
- adc \T1, \T3
- mulx \T3, \T4, 24\M1 // T3:T4 = A0*B3
- adc \T2, \T4
- mov rdx, 8\M0
- adc \T3, 0
-
- mulx \T5, \T4, \M1 // T5:T4 = A1*B0
- mulx \T6, \T7, 8\M1 // T6:T7 = A1*B1
- add \T5, \T7
- mulx \T7, \T8, 16\M1 // T7:T8 = A1*B2
- adc \T6, \T8
- mulx \T8, \T9, 24\M1 // T8:T9 = A1*B3
- adc \T7, \T9
- adc \T8, 0
-
- add \T4, \T0
- mov 8\C, \T4 // C1_final
- adc \T5, \T1
- adc \T6, \T2
- adc \T7, \T3
- mov rdx, 16\M0
- adc \T8, 0
-
- mulx \T1, \T0, \M1 // T1:T0 = A2*B0
- mulx \T2, \T3, 8\M1 // T2:T3 = A2*B1
- add \T1, \T3
- mulx \T3, \T4, 16\M1 // T3:T4 = A2*B2
- adc \T2, \T4
- mulx \T4,\T9, 24\M1 // T3:T4 = A2*B3
- adc \T3, \T9
- mov rdx, 24\M0
- adc \T4, 0
-
- add \T0, \T5
- mov 16\C, \T0 // C2_final
- adc \T1, \T6
- adc \T2, \T7
- adc \T3, \T8
- adc \T4, 0
-
- mulx \T5, \T0, \M1 // T5:T0 = A3*B0
- mulx \T6, \T7, 8\M1 // T6:T7 = A3*B1
- add \T5, \T7
- mulx \T7, \T8, 16\M1 // T7:T8 = A3*B2
- adc \T6, \T8
- mulx \T8, \T9, 24\M1 // T8:T9 = A3*B3
- adc \T7, \T9
- adc \T8, 0
-
- add \T1, \T0
- mov 24\C, \T1 // C3_final
- adc \T2, \T5
- mov 32\C, \T2 // C4_final
- adc \T3, \T6
- mov 40\C, \T3 // C5_final
- adc \T4, \T7
- mov 48\C, \T4 // C6_final
- adc \T8, 0
- mov 56\C, \T8 // C7_final
-.endm
-#endif // S2N_ADX
-
-//*****************************************************************************
-// 434-bit multiplication using Karatsuba (one level), schoolbook (one level)
-//*****************************************************************************
-.global mul434_asm
-mul434_asm:
- push r12
- push r13
- push r14
- push r15
- mov rcx, reg_p3
-
- // r8-r11 <- AH + AL, rax <- mask
- xor rax, rax
- mov r8, [reg_p1]
- mov r9, [reg_p1+8]
- mov r10, [reg_p1+16]
- mov r11, [reg_p1+24]
- push rbx
- push rbp
- sub rsp, 96
- add r8, [reg_p1+32]
- adc r9, [reg_p1+40]
- adc r10, [reg_p1+48]
- adc r11, 0
- sbb rax, 0
- mov [rsp], r8
- mov [rsp+8], r9
- mov [rsp+16], r10
- mov [rsp+24], r11
-
- // r12-r15 <- BH + BL, rbx <- mask
- xor rbx, rbx
- mov r12, [reg_p2]
- mov r13, [reg_p2+8]
- mov r14, [reg_p2+16]
- mov r15, [reg_p2+24]
- add r12, [reg_p2+32]
- adc r13, [reg_p2+40]
- adc r14, [reg_p2+48]
- adc r15, 0
- sbb rbx, 0
- mov [rsp+32], r12
- mov [rsp+40], r13
- mov [rsp+48], r14
- mov [rsp+56], r15
-
- // r12-r15 <- masked (BH + BL)
- and r12, rax
- and r13, rax
- and r14, rax
- and r15, rax
-
- // r8-r11 <- masked (AH + AL)
- and r8, rbx
- and r9, rbx
- and r10, rbx
- and r11, rbx
-
- // r8-r11 <- masked (AH + AL) + masked (BH + BL)
- add r8, r12
- adc r9, r13
- adc r10, r14
- adc r11, r15
- mov [rsp+64], r8
- mov [rsp+72], r9
- mov [rsp+80], r10
- mov [rsp+88], r11
-
- // [rsp] <- (AH+AL) x (BH+BL), low part
- MUL256_SCHOOL [rsp], [rsp+32], [rsp], r8, r9, r10, r11, r12, r13, r14, r15, rbx, rbp
-
- // [rcx] <- AL x BL
- MUL256_SCHOOL [reg_p1], [reg_p2], [rcx], r8, r9, r10, r11, r12, r13, r14, r15, rbx, rbp // Result C0-C3
-
- // [rcx+64] <- AH x BH
- MUL192_SCHOOL [reg_p1+32], [reg_p2+32], [rcx+64], r8, r9, r10, r11, r12, r13, r14
-
- // r8-r11 <- (AH+AL) x (BH+BL), final step
- mov r8, [rsp+64]
- mov r9, [rsp+72]
- mov r10, [rsp+80]
- mov r11, [rsp+88]
- mov rax, [rsp+32]
- add r8, rax
- mov rax, [rsp+40]
- adc r9, rax
- mov rax, [rsp+48]
- adc r10, rax
- mov rax, [rsp+56]
- adc r11, rax
-
- // [rsp], x3-x5 <- (AH+AL) x (BH+BL) - ALxBL
- mov r12, [rsp]
- mov r13, [rsp+8]
- mov r14, [rsp+16]
- mov r15, [rsp+24]
- sub r12, [rcx]
- sbb r13, [rcx+8]
- sbb r14, [rcx+16]
- sbb r15, [rcx+24]
- sbb r8, [rcx+32]
- sbb r9, [rcx+40]
- sbb r10, [rcx+48]
- sbb r11, [rcx+56]
-
- // r8-r15 <- (AH+AL) x (BH+BL) - ALxBL - AHxBH
- sub r12, [rcx+64]
- sbb r13, [rcx+72]
- sbb r14, [rcx+80]
- sbb r15, [rcx+88]
- sbb r8, [rcx+96]
- sbb r9, [rcx+104]
- sbb r10, 0
- sbb r11, 0
-
- add r12, [rcx+32]
- mov [rcx+32], r12 // Result C4-C7
- adc r13, [rcx+40]
- mov [rcx+40], r13
- adc r14, [rcx+48]
- mov [rcx+48], r14
- adc r15, [rcx+56]
- mov [rcx+56], r15
- adc r8, [rcx+64]
- mov [rcx+64], r8 // Result C8-C15
- adc r9, [rcx+72]
- mov [rcx+72], r9
- adc r10, [rcx+80]
- mov [rcx+80], r10
- adc r11, [rcx+88]
- mov [rcx+88], r11
- mov r12, [rcx+96]
- adc r12, 0
- mov [rcx+96], r12
- mov r13, [rcx+104]
- adc r13, 0
- mov [rcx+104], r13
-
- add rsp, 96
- pop rbp
- pop rbx
- pop r15
- pop r14
- pop r13
- pop r12
- ret
-
-///////////////////////////////////////////////////////////////// MACRO
-// Schoolbook integer multiplication
-// Inputs: memory pointers M0 and M1
-// Outputs: regs T0:T5
-// Temps: regs T7:T6
-/////////////////////////////////////////////////////////////////
-.macro MUL64x256_SCHOOL M0, M1, T0, T1, T2, T3, T4, T5
- mov rdx, \M0
- mulx \T1, \T0, \M1 // T0 <- C0_final
- mulx \T2, \T4, 8\M1
- xor rax, rax
- mulx \T3, \T5, 16\M1
- ADD1 \T1, \T4 // T1 <- C1_final
- ADC1 \T2, \T5 // T2 <- C2_final
- mulx \T4, \T5, 24\M1
- ADC1 \T3, \T5 // T3 <- C3_final
- ADC1 \T4, rax // T4 <- C4_final
-.endm
-
-#ifdef S2N_ADX
-.macro MUL128x256_SCHOOL M0, M1, T0, T1, T2, T3, T4, T5, T6
- mov rdx, \M0
- mulx \T1, \T0, \M1 // T0 <- C0_final
- mulx \T2, \T4, 8\M1
- xor rax, rax
- mulx \T3, \T5, 16\M1
- ADD1 \T1, \T4
- ADC1 \T2, \T5
- mulx \T4, \T5, 24\M1
- ADC1 \T3, \T5
- ADC1 \T4, rax
-
- xor rax, rax
- mov rdx, 8\M0
- mulx \T6, \T5, \M1
- ADD2 \T1, \T5 // T1 <- C1_final
- ADC2 \T2, \T6
- mulx \T5, \T6, 8\M1
- ADC2 \T3, \T5
- ADD1 \T2, \T6
- mulx \T5, \T6, 16\M1
- ADC2 \T4, \T5
- ADC1 \T3, \T6
- mulx \T5, \T6, 24\M1
- ADC2 \T5, rax
- ADC1 \T4, \T6
- ADC1 \T5, rax
-.endm
-
-#else // S2N_ADX
-
-.macro MUL128x256_SCHOOL M0, M1, T0, T1, T2, T3, T4, T5, T6
- mov rdx, \M0
- mulx \T1, \T0, \M1 // T0 <- C0_final
- mulx \T2, \T4, 8\M1
- mulx \T3, \T5, 16\M1
- add \T1, \T4
- adc \T2, \T5
- mulx \T4, \T5, 24\M1
- adc \T3, \T5
- adc \T4, 0
-
- mov rdx, 8\M0
- mulx \T6, \T5, \M1
- add \T1, \T5 // T1 <- C1_final
- adc \T2, \T6
- mulx \T5, \T6, 8\M1
- adc \T3, \T5
- mulx \T5, rax, 16\M1
- adc \T4, \T5
- mulx \T5, rdx, 24\M1
- adc \T5, 0
- add \T2, \T6
- adc \T3, rax
- adc \T4, rdx
- adc \T5, 0
-.endm
-#endif // S2N_ADX
-
-//**************************************************************************************
-// Montgomery reduction
-// Based on method described in Faz-Hernandez et al. https://eprint.iacr.org/2017/1015
-// Operation: c [reg_p2] = a [reg_p1]
-// NOTE: a=c is not allowed
-//**************************************************************************************
-.global rdc434_asm
-rdc434_asm:
- push r12
- push r13
-
- // a[0-1] x p434p1_nz --> result: r8:r13
- MUL128x256_SCHOOL [reg_p1], [rip+asm_p434p1+24], r8, r9, r10, r11, r12, r13, rcx
-
- xor rcx, rcx
- add r8, [reg_p1+24]
- adc r9, [reg_p1+32]
- adc r10, [reg_p1+40]
- adc r11, [reg_p1+48]
- adc r12, [reg_p1+56]
- adc r13, [reg_p1+64]
- adc rcx, [reg_p1+72]
- mov [reg_p1+24], r8
- mov [reg_p1+32], r9
- mov [reg_p1+40], r10
- mov [reg_p1+48], r11
- mov [reg_p1+56], r12
- mov [reg_p1+64], r13
- mov [reg_p1+72], rcx
- mov r8, [reg_p1+80]
- mov r9, [reg_p1+88]
- mov r10, [reg_p1+96]
- mov r11, [reg_p1+104]
- adc r8, 0
- adc r9, 0
- adc r10, 0
- adc r11, 0
- mov [reg_p1+80], r8
- mov [reg_p1+88], r9
- mov [reg_p1+96], r10
- mov [reg_p1+104], r11
-
- // a[2-3] x p434p1_nz --> result: r8:r13
- MUL128x256_SCHOOL [reg_p1+16], [rip+asm_p434p1+24], r8, r9, r10, r11, r12, r13, rcx
-
- xor rcx, rcx
- add r8, [reg_p1+40]
- adc r9, [reg_p1+48]
- adc r10, [reg_p1+56]
- adc r11, [reg_p1+64]
- adc r12, [reg_p1+72]
- adc r13, [reg_p1+80]
- adc rcx, [reg_p1+88]
- mov [reg_p1+40], r8
- mov [reg_p1+48], r9
- mov [reg_p1+56], r10
- mov [reg_p1+64], r11
- mov [reg_p1+72], r12
- mov [reg_p1+80], r13
- mov [reg_p1+88], rcx
- mov r8, [reg_p1+96]
- mov r9, [reg_p1+104]
- adc r8, 0
- adc r9, 0
- mov [reg_p1+96], r8
- mov [reg_p1+104], r9
-
- // a[4-5] x p434p1_nz --> result: r8:r13
- MUL128x256_SCHOOL [reg_p1+32], [rip+asm_p434p1+24], r8, r9, r10, r11, r12, r13, rcx
-
- xor rcx, rcx
- add r8, [reg_p1+56]
- adc r9, [reg_p1+64]
- adc r10, [reg_p1+72]
- adc r11, [reg_p1+80]
- adc r12, [reg_p1+88]
- adc r13, [reg_p1+96]
- adc rcx, [reg_p1+104]
- mov [reg_p2], r8 // Final result c0-c1
- mov [reg_p2+8], r9
- mov [reg_p1+72], r10
- mov [reg_p1+80], r11
- mov [reg_p1+88], r12
- mov [reg_p1+96], r13
- mov [reg_p1+104], rcx
-
- // a[6-7] x p434p1_nz --> result: r8:r12
- MUL64x256_SCHOOL [reg_p1+48], [rip+asm_p434p1+24], r8, r9, r10, r11, r12, r13
-
- // Final result c2:c6
- add r8, [reg_p1+72]
- adc r9, [reg_p1+80]
- adc r10, [reg_p1+88]
- adc r11, [reg_p1+96]
- adc r12, [reg_p1+104]
- mov [reg_p2+16], r8
- mov [reg_p2+24], r9
- mov [reg_p2+32], r10
- mov [reg_p2+40], r11
- mov [reg_p2+48], r12
-
- pop r13
- pop r12
- ret
-
-//***********************************************************************
-// 434-bit multiprecision addition
-// Operation: c [reg_p3] = a [reg_p1] + b [reg_p2]
-//***********************************************************************
-.global mp_add434_asm
-mp_add434_asm:
- mov r8, [reg_p1]
- mov r9, [reg_p1+8]
- mov r10, [reg_p1+16]
- mov r11, [reg_p1+24]
- add r8, [reg_p2]
- adc r9, [reg_p2+8]
- adc r10, [reg_p2+16]
- adc r11, [reg_p2+24]
- mov [reg_p3], r8
- mov [reg_p3+8], r9
- mov [reg_p3+16], r10
- mov [reg_p3+24], r11
-
- mov r8, [reg_p1+32]
- mov r9, [reg_p1+40]
- mov r10, [reg_p1+48]
- adc r8, [reg_p2+32]
- adc r9, [reg_p2+40]
- adc r10, [reg_p2+48]
- mov [reg_p3+32], r8
- mov [reg_p3+40], r9
- mov [reg_p3+48], r10
- ret
-
-//***********************************************************************
-// 2x434-bit multiprecision subtraction/addition
-// Operation: c [x2] = a [x0] - b [x1]. If c < 0, add p434*2^448
-//***********************************************************************
-.global mp_subadd434x2_asm
-mp_subadd434x2_asm:
- push r12
- push r13
- push r14
- push r15
- xor rax, rax
- mov r8, [reg_p1]
- mov r9, [reg_p1+8]
- mov r10, [reg_p1+16]
- mov r11, [reg_p1+24]
- mov r12, [reg_p1+32]
- sub r8, [reg_p2]
- sbb r9, [reg_p2+8]
- sbb r10, [reg_p2+16]
- sbb r11, [reg_p2+24]
- sbb r12, [reg_p2+32]
- mov [reg_p3], r8
- mov [reg_p3+8], r9
- mov [reg_p3+16], r10
- mov [reg_p3+24], r11
- mov [reg_p3+32], r12
-
- mov r8, [reg_p1+40]
- mov r9, [reg_p1+48]
- mov r10, [reg_p1+56]
- mov r11, [reg_p1+64]
- mov r12, [reg_p1+72]
- sbb r8, [reg_p2+40]
- sbb r9, [reg_p2+48]
- sbb r10, [reg_p2+56]
- sbb r11, [reg_p2+64]
- sbb r12, [reg_p2+72]
- mov [reg_p3+40], r8
- mov [reg_p3+48], r9
- mov [reg_p3+56], r10
-
- mov r13, [reg_p1+80]
- mov r14, [reg_p1+88]
- mov r15, [reg_p1+96]
- mov rcx, [reg_p1+104]
- sbb r13, [reg_p2+80]
- sbb r14, [reg_p2+88]
- sbb r15, [reg_p2+96]
- sbb rcx, [reg_p2+104]
- sbb rax, 0
-
- // Add p434 anded with the mask in rax
- mov r8, [rip+asm_p434]
- mov r9, [rip+asm_p434+24]
- mov r10, [rip+asm_p434+32]
- mov rdi, [rip+asm_p434+40]
- mov rsi, [rip+asm_p434+48]
- and r8, rax
- and r9, rax
- and r10, rax
- and rdi, rax
- and rsi, rax
- mov rax, [reg_p3+56]
- add rax, r8
- adc r11, r8
- adc r12, r8
- adc r13, r9
- adc r14, r10
- adc r15, rdi
- adc rcx, rsi
-
- mov [reg_p3+56], rax
- mov [reg_p3+64], r11
- mov [reg_p3+72], r12
- mov [reg_p3+80], r13
- mov [reg_p3+88], r14
- mov [reg_p3+96], r15
- mov [reg_p3+104], rcx
- pop r15
- pop r14
- pop r13
- pop r12
- ret
-
-//***********************************************************************
-// Double 2x434-bit multiprecision subtraction
-// Operation: c [reg_p3] = c [reg_p3] - a [reg_p1] - b [reg_p2]
-//***********************************************************************
-.global mp_dblsub434x2_asm
-mp_dblsub434x2_asm:
- push r12
- push r13
-
- xor rax, rax
- mov r8, [reg_p3]
- mov r9, [reg_p3+8]
- mov r10, [reg_p3+16]
- mov r11, [reg_p3+24]
- mov r12, [reg_p3+32]
- mov r13, [reg_p3+40]
- mov rcx, [reg_p3+48]
- sub r8, [reg_p1]
- sbb r9, [reg_p1+8]
- sbb r10, [reg_p1+16]
- sbb r11, [reg_p1+24]
- sbb r12, [reg_p1+32]
- sbb r13, [reg_p1+40]
- sbb rcx, [reg_p1+48]
- adc rax, 0
- sub r8, [reg_p2]
- sbb r9, [reg_p2+8]
- sbb r10, [reg_p2+16]
- sbb r11, [reg_p2+24]
- sbb r12, [reg_p2+32]
- sbb r13, [reg_p2+40]
- sbb rcx, [reg_p2+48]
- adc rax, 0
- mov [reg_p3], r8
- mov [reg_p3+8], r9
- mov [reg_p3+16], r10
- mov [reg_p3+24], r11
- mov [reg_p3+32], r12
- mov [reg_p3+40], r13
- mov [reg_p3+48], rcx
-
- mov r8, [reg_p3+56]
- mov r9, [reg_p3+64]
- mov r10, [reg_p3+72]
- mov r11, [reg_p3+80]
- mov r12, [reg_p3+88]
- mov r13, [reg_p3+96]
- mov rcx, [reg_p3+104]
- sub r8, rax
- sbb r8, [reg_p1+56]
- sbb r9, [reg_p1+64]
- sbb r10, [reg_p1+72]
- sbb r11, [reg_p1+80]
- sbb r12, [reg_p1+88]
- sbb r13, [reg_p1+96]
- sbb rcx, [reg_p1+104]
- sub r8, [reg_p2+56]
- sbb r9, [reg_p2+64]
- sbb r10, [reg_p2+72]
- sbb r11, [reg_p2+80]
- sbb r12, [reg_p2+88]
- sbb r13, [reg_p2+96]
- sbb rcx, [reg_p2+104]
- mov [reg_p3+56], r8
- mov [reg_p3+64], r9
- mov [reg_p3+72], r10
- mov [reg_p3+80], r11
- mov [reg_p3+88], r12
- mov [reg_p3+96], r13
- mov [reg_p3+104], rcx
-
- pop r13
- pop r12
- ret
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3.c
new file mode 100644
index 0000000000..7ce71ae3d3
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3.c
@@ -0,0 +1,146 @@
+/********************************************************************************************
+* Supersingular Isogeny Key Encapsulation Library
+*
+* Abstract: supersingular isogeny parameters and generation of functions for P434
+*********************************************************************************************/
+
+#include "sikep434r3.h"
+
+/* Encoding of field elements, elements over Z_order, elements over GF(p^2) and elliptic curve points:
+ *
+ * Elements over GF(p) and Z_order are encoded with the least significant octet (and digit) located at
+ * the leftmost position (i.e., little endian format). Elements (a+b*i) over GF(p^2), where a and b are
+ * defined over GF(p), are encoded as {a, b}, with a in the least significant position. Elliptic curve
+ * points P = (x,y) are encoded as {x, y}, with x in the least significant position. Internally, the
+ * number of digits used to represent all these elements is obtained by approximating the number of bits
+ * to the immediately greater multiple of 32. For example, a 434-bit field element is represented with
+ * Ceil(434 / 64) = 7 64-bit digits or Ceil(434 / 32) = 14 32-bit digits.
+ *
+ * Curve isogeny system "SIDHp434". Base curve: Montgomery curve By^2 = Cx^3 + Ax^2 + Cx defined over
+ * GF(p434^2), where A=6, B=1, C=1 and p434 = 2^216*3^137-1 */
+
+const uint64_t p434[S2N_SIKE_P434_R3_NWORDS64_FIELD] = {
+ 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
+ 0xFDC1767AE2FFFFFF, 0x7BC65C783158AEA3, 0x6CFC5FD681C52056,
+ 0x0002341F27177344
+};
+
+const uint64_t p434x2[S2N_SIKE_P434_R3_NWORDS64_FIELD] = {
+ 0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
+ 0xFB82ECF5C5FFFFFF, 0xF78CB8F062B15D47, 0xD9F8BFAD038A40AC,
+ 0x0004683E4E2EE688
+};
+
+const uint64_t p434x4[S2N_SIKE_P434_R3_NWORDS64_FIELD] = {
+ 0xFFFFFFFFFFFFFFFC, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
+ 0xF705D9EB8BFFFFFF, 0xEF1971E0C562BA8F, 0xB3F17F5A07148159,
+ 0x0008D07C9C5DCD11
+};
+
+const uint64_t p434p1[S2N_SIKE_P434_R3_NWORDS64_FIELD] = {
+ 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
+ 0xFDC1767AE3000000, 0x7BC65C783158AEA3, 0x6CFC5FD681C52056,
+ 0x0002341F27177344
+};
+
+/* Alice's generator values {XPA0 + XPA1*i, XQA0 + xQA1*i, XRA0 + XRA1*i} in GF(p434^2),
+ * expressed in Montgomery representation */
+const uint64_t A_gen[6*S2N_SIKE_P434_R3_NWORDS64_FIELD] = {
+ 0x05ADF455C5C345BF, 0x91935C5CC767AC2B, 0xAFE4E879951F0257,
+ 0x70E792DC89FA27B1, 0xF797F526BB48C8CD, 0x2181DB6131AF621F,
+ 0x00000A1C08B1ECC4, /* XPA0 */
+
+ 0x74840EB87CDA7788, 0x2971AA0ECF9F9D0B, 0xCB5732BDF41715D5,
+ 0x8CD8E51F7AACFFAA, 0xA7F424730D7E419F, 0xD671EB919A179E8C,
+ 0x0000FFA26C5A924A, /* XPA1 */
+
+ 0xFEC6E64588B7273B, 0xD2A626D74CBBF1C6, 0xF8F58F07A78098C7,
+ 0xE23941F470841B03, 0x1B63EDA2045538DD, 0x735CFEB0FFD49215,
+ 0x0001C4CB77542876, /* XQA0 */
+
+ 0xADB0F733C17FFDD6, 0x6AFFBD037DA0A050, 0x680EC43DB144E02F,
+ 0x1E2E5D5FF524E374, 0xE2DDA115260E2995, 0xA6E4B552E2EDE508,
+ 0x00018ECCDDF4B53E, /* XQA1 */
+
+ 0x01BA4DB518CD6C7D, 0x2CB0251FE3CC0611, 0x259B0C6949A9121B,
+ 0x60E17AC16D2F82AD, 0x3AA41F1CE175D92D, 0x413FBE6A9B9BC4F3,
+ 0x00022A81D8D55643, /* XRA0 */
+
+ 0xB8ADBC70FC82E54A, 0xEF9CDDB0D5FADDED, 0x5820C734C80096A0,
+ 0x7799994BAA96E0E4, 0x044961599E379AF8, 0xDB2B94FBF09F27E2,
+ 0x0000B87FC716C0C6 /* XRA1 */
+};
+
+/* Bob's generator values {XPB0, XQB0, XRB0 + XRB1*i} in GF(p434^2), expressed in Montgomery representation */
+const uint64_t B_gen[6*S2N_SIKE_P434_R3_NWORDS64_FIELD] = {
+ 0x6E5497556EDD48A3, 0x2A61B501546F1C05, 0xEB919446D049887D,
+ 0x5864A4A69D450C4F, 0xB883F276A6490D2B, 0x22CC287022D5F5B9,
+ 0x0001BED4772E551F, /* XPB0 */
+
+ 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
+ 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
+ 0x0000000000000000, /* XPB1 */
+
+ 0xFAE2A3F93D8B6B8E, 0x494871F51700FE1C, 0xEF1A94228413C27C,
+ 0x498FF4A4AF60BD62, 0xB00AD2A708267E8A, 0xF4328294E017837F,
+ 0x000034080181D8AE, /* XQB0 */
+
+ 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
+ 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
+ 0x0000000000000000, /* XQB1 */
+
+ 0x283B34FAFEFDC8E4, 0x9208F44977C3E647, 0x7DEAE962816F4E9A,
+ 0x68A2BA8AA262EC9D, 0x8176F112EA43F45B, 0x02106D022634F504,
+ 0x00007E8A50F02E37, /* XRB0 */
+
+ 0xB378B7C1DA22CCB1, 0x6D089C99AD1D9230, 0xEBE15711813E2369,
+ 0x2B35A68239D48A53, 0x445F6FD138407C93, 0xBEF93B29A3F6B54B,
+ 0x000173FA910377D3 /* XRB1 */
+};
+
+/* Montgomery constant Montgomery_R2 = (2^448)^2 mod p434 */
+const uint64_t Montgomery_R2[S2N_SIKE_P434_R3_NWORDS64_FIELD] = {
+ 0x28E55B65DCD69B30, 0xACEC7367768798C2, 0xAB27973F8311688D,
+ 0x175CC6AF8D6C7C0B, 0xABCD92BF2DDE347E, 0x69E16A61C7686D9A,
+ 0x000025A89BCDD12A
+};
+
+/* Value one in Montgomery representation */
+const uint64_t Montgomery_one[S2N_SIKE_P434_R3_NWORDS64_FIELD] = {
+ 0x000000000000742C, 0x0000000000000000, 0x0000000000000000,
+ 0xB90FF404FC000000, 0xD801A4FB559FACD4, 0xE93254545F77410C,
+ 0x0000ECEEA7BD2EDA
+};
+
+/* Fixed parameters for isogeny tree computation */
+const unsigned int strat_Alice[S2N_SIKE_P434_R3_MAX_ALICE-1] = {
+ 48, 28, 16, 8, 4, 2, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 8, 4, 2, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 13, 7, 4,
+ 2, 1, 1, 2, 1, 1, 3, 2, 1, 1, 1, 1, 5, 4, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 21, 12, 7, 4, 2, 1, 1, 2, 1, 1, 3, 2,
+ 1, 1, 1, 1, 5, 3, 2, 1, 1, 1, 1, 2, 1, 1, 1, 9, 5, 3, 2, 1, 1, 1, 1, 2, 1, 1, 1, 4, 2, 1, 1, 1, 2, 1, 1
+};
+
+const unsigned int strat_Bob[S2N_SIKE_P434_R3_MAX_BOB-1] = {
+ 66, 33, 17, 9, 5, 3, 2, 1, 1, 1, 1, 2, 1, 1, 1, 4, 2, 1, 1, 1, 2, 1, 1, 8, 4, 2, 1, 1, 1, 2, 1, 1, 4, 2, 1, 1,
+ 2, 1, 1, 16, 8, 4, 2, 1, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 8, 4, 2, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 32,
+ 16, 8, 4, 3, 1, 1, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 8, 4, 2, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 16, 8, 4,
+ 2, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1, 8, 4, 2, 1, 1, 2, 1, 1, 4, 2, 1, 1, 2, 1, 1
+};
+
+/* Returns true if the machine is big endian */
+bool is_big_endian()
+{
+ uint16_t i = 1;
+ uint8_t *ptr = (uint8_t *)&i;
+ return !(*ptr);
+}
+
+uint32_t bswap32(uint32_t x)
+{
+ uint32_t i = (x >> 16) | (x << 16);
+ return ((i & UINT32_C(0xff00ff00)) >> 8) | ((i & UINT32_C(0x00ff00ff)) << 8);
+}
+
+uint64_t bswap64(uint64_t x)
+{
+ return bswap32(x >> 32) | (((uint64_t)bswap32(x)) << 32);
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3.h b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3.h
new file mode 100644
index 0000000000..5b797b1d7f
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3.h
@@ -0,0 +1,181 @@
+/********************************************************************************************
+* Supersingular Isogeny Key Encapsulation Library
+*
+* Abstract: supersingular isogeny parameters, generation of functions for P434;
+* configuration and platform-dependent macros
+*********************************************************************************************/
+
+#pragma once
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+/* All sikep434r3 functions and global variables in the pq-crypto/sike_r3 directory
+ * should be defined using this namespace macro to avoid symbol collisions. For example,
+ * in foo.h, declare a function as follows:
+ *
+ * #define foo_function S2N_SIKE_P434_R3_NAMESPACE(foo_function)
+ * int foo_function(int foo_argument); */
+#define S2N_SIKE_P434_R3_NAMESPACE(s) s2n_sike_p434_r3_##s
+
+/* Endian-related functionality */
+/* Returns true if the machine is big endian */
+#define is_big_endian S2N_SIKE_P434_R3_NAMESPACE(is_big_endian)
+bool is_big_endian(void);
+
+#define bswap32 S2N_SIKE_P434_R3_NAMESPACE(bswap32)
+uint32_t bswap32(uint32_t x);
+
+#define bswap64 S2N_SIKE_P434_R3_NAMESPACE(bswap64)
+uint64_t bswap64(uint64_t x);
+
+/* Arch specific definitions */
+#define digit_t S2N_SIKE_P434_R3_NAMESPACE(digit_t)
+#define hdigit_t S2N_SIKE_P434_R3_NAMESPACE(hdigit_t)
+#if defined(_AMD64_) || defined(__x86_64) || defined(__x86_64__) || defined(__aarch64__) || defined(_S390X_) || defined(_ARM64_) || defined(__powerpc64__) || (defined(__riscv) && (__riscv_xlen == 64))
+ #define S2N_SIKE_P434_R3_NWORDS_FIELD 7 /* Number of words of a 434-bit field element */
+ #define S2N_SIKE_P434_R3_ZERO_WORDS 3 /* Number of "0" digits in the least significant part of p434 + 1 */
+ #define S2N_SIKE_P434_R3_RADIX 64
+ #define S2N_SIKE_P434_R3_LOG2RADIX 6
+ #define S2N_SIKE_P434_R3_BSWAP_DIGIT(i) bswap64((i))
+ typedef uint64_t digit_t;
+ typedef uint32_t hdigit_t;
+#elif defined(_X86_) || defined(_ARM_) || defined(__arm__) || defined(__i386__)
+ #define S2N_SIKE_P434_R3_NWORDS_FIELD 14 /* Number of words of a 434-bit field element */
+ #define S2N_SIKE_P434_R3_ZERO_WORDS 6 /* Number of "0" digits in the least significant part of p434 + 1 */
+ #define S2N_SIKE_P434_R3_RADIX 32
+ #define S2N_SIKE_P434_R3_LOG2RADIX 5
+ #define S2N_SIKE_P434_R3_BSWAP_DIGIT(i) bswap32((i))
+ typedef uint32_t digit_t;
+ typedef uint16_t hdigit_t;
+#else
+ #error -- "Unsupported ARCHITECTURE"
+#endif
+
+/* Basic constants */
+#define S2N_SIKE_P434_R3_NBITS_FIELD 434
+#define S2N_SIKE_P434_R3_MAXBITS_FIELD 448
+/* Number of 64-bit words of a 434-bit field element */
+#define S2N_SIKE_P434_R3_NWORDS64_FIELD ((S2N_SIKE_P434_R3_NBITS_FIELD+63)/64)
+#define S2N_SIKE_P434_R3_NBITS_ORDER 256
+/* Number of words of oA and oB, where oA and oB are the subgroup orders of Alice and Bob, resp. */
+#define S2N_SIKE_P434_R3_NWORDS_ORDER ((S2N_SIKE_P434_R3_NBITS_ORDER+S2N_SIKE_P434_R3_RADIX-1)/S2N_SIKE_P434_R3_RADIX)
+#define S2N_SIKE_P434_R3_ALICE 0
+#define S2N_SIKE_P434_R3_BOB 1
+#define S2N_SIKE_P434_R3_OALICE_BITS 216
+#define S2N_SIKE_P434_R3_OBOB_BITS 218
+#define S2N_SIKE_P434_R3_MASK_ALICE 0xFF
+#define S2N_SIKE_P434_R3_MASK_BOB 0x01
+
+/* Fixed parameters for isogeny tree computation */
+#define S2N_SIKE_P434_R3_MAX_INT_POINTS_ALICE 7
+#define S2N_SIKE_P434_R3_MAX_INT_POINTS_BOB 8
+#define S2N_SIKE_P434_R3_MAX_ALICE 108
+#define S2N_SIKE_P434_R3_MAX_BOB 137
+#define S2N_SIKE_P434_R3_MSG_BYTES 16
+#define S2N_SIKE_P434_R3_SECRETKEY_A_BYTES ((S2N_SIKE_P434_R3_OALICE_BITS + 7) / 8)
+#define S2N_SIKE_P434_R3_SECRETKEY_B_BYTES ((S2N_SIKE_P434_R3_OBOB_BITS - 1 + 7) / 8)
+#define S2N_SIKE_P434_R3_FP2_ENCODED_BYTES (2 * ((S2N_SIKE_P434_R3_NBITS_FIELD + 7) / 8))
+
+/* SIDH's basic element definitions and point representations */
+/* Datatype for representing 434-bit field elements (448-bit max.) */
+#define felm_t S2N_SIKE_P434_R3_NAMESPACE(felm_t)
+typedef digit_t felm_t[S2N_SIKE_P434_R3_NWORDS_FIELD];
+
+/* Datatype for representing double-precision 2x434-bit field elements (2x448-bit max.) */
+#define dfelm_t S2N_SIKE_P434_R3_NAMESPACE(dfelm_t)
+typedef digit_t dfelm_t[2*S2N_SIKE_P434_R3_NWORDS_FIELD];
+
+/* Datatype for representing quadratic extension field elements GF(p434^2) */
+#define f2elm_t S2N_SIKE_P434_R3_NAMESPACE(f2elm_t)
+#define felm_s S2N_SIKE_P434_R3_NAMESPACE(felm_s)
+typedef struct felm_s {
+ felm_t e[2];
+} f2elm_t;
+
+/* Point representation in projective XZ Montgomery coordinates. */
+#define point_proj S2N_SIKE_P434_R3_NAMESPACE(point_proj)
+typedef struct { f2elm_t X; f2elm_t Z; } point_proj;
+#define point_proj_t S2N_SIKE_P434_R3_NAMESPACE(point_proj_t)
+typedef point_proj point_proj_t[1];
+
+/* Macro to avoid compiler warnings when detecting unreferenced parameters */
+#define S2N_SIKE_P434_R3_UNREFERENCED_PARAMETER(PAR) ((void)(PAR))
+
+/********************** Constant-time unsigned comparisons ***********************/
+/* The following functions return 1 (TRUE) if condition is true, 0 (FALSE) otherwise */
+
+/* Is x != 0? */
+static __inline unsigned int is_digit_nonzero_ct(const digit_t x)
+{
+ return (unsigned int)((x | (0-x)) >> (S2N_SIKE_P434_R3_RADIX-1));
+}
+
+/* Is x = 0? */
+static __inline unsigned int is_digit_zero_ct(const digit_t x)
+{
+ return (unsigned int)(1 ^ is_digit_nonzero_ct(x));
+}
+
+/* Is x < y? */
+static __inline unsigned int is_digit_lessthan_ct(const digit_t x, const digit_t y)
+{
+ return (unsigned int)((x ^ ((x ^ y) | ((x - y) ^ y))) >> (S2N_SIKE_P434_R3_RADIX-1));
+}
+
+/* Definitions for generic C implementation */
+
+typedef uint64_t uint128_t[2];
+
+/* Digit multiplication */
+#define S2N_SIKE_P434_R3_MUL(multiplier, multiplicand, hi, lo) \
+ digit_x_digit((multiplier), (multiplicand), &(lo));
+
+/* Digit addition with carry */
+#define S2N_SIKE_P434_R3_ADDC(carryIn, addend1, addend2, carryOut, sumOut) \
+ { digit_t tempReg = (addend1) + (digit_t)(carryIn); \
+ (sumOut) = (addend2) + tempReg; \
+ (carryOut) = (is_digit_lessthan_ct(tempReg, (digit_t)(carryIn)) | is_digit_lessthan_ct((sumOut), tempReg)); }
+
+/* Digit subtraction with borrow */
+#define S2N_SIKE_P434_R3_SUBC(borrowIn, minuend, subtrahend, borrowOut, differenceOut) \
+ { digit_t tempReg = (minuend) - (subtrahend); \
+ unsigned int borrowReg = (is_digit_lessthan_ct((minuend), (subtrahend)) | ((borrowIn) & is_digit_zero_ct(tempReg))); \
+ (differenceOut) = tempReg - (digit_t)(borrowIn); \
+ (borrowOut) = borrowReg; }
+
+/* Shift right with flexible datatype */
+#define S2N_SIKE_P434_R3_SHIFTR(highIn, lowIn, shift, shiftOut, DigitSize) \
+ (shiftOut) = ((lowIn) >> (shift)) ^ ((highIn) << ((DigitSize) - (shift)));
+
+/* Fixed parameters for computation */
+#define p434 S2N_SIKE_P434_R3_NAMESPACE(p434)
+extern const uint64_t p434[S2N_SIKE_P434_R3_NWORDS64_FIELD];
+
+#define p434x2 S2N_SIKE_P434_R3_NAMESPACE(p434x2)
+extern const uint64_t p434x2[S2N_SIKE_P434_R3_NWORDS64_FIELD];
+
+#define p434x4 S2N_SIKE_P434_R3_NAMESPACE(p434x4)
+extern const uint64_t p434x4[S2N_SIKE_P434_R3_NWORDS64_FIELD];
+
+#define p434p1 S2N_SIKE_P434_R3_NAMESPACE(p434p1)
+extern const uint64_t p434p1[S2N_SIKE_P434_R3_NWORDS64_FIELD];
+
+#define A_gen S2N_SIKE_P434_R3_NAMESPACE(A_gen)
+extern const uint64_t A_gen[6*S2N_SIKE_P434_R3_NWORDS64_FIELD];
+
+#define B_gen S2N_SIKE_P434_R3_NAMESPACE(B_gen)
+extern const uint64_t B_gen[6*S2N_SIKE_P434_R3_NWORDS64_FIELD];
+
+#define Montgomery_R2 S2N_SIKE_P434_R3_NAMESPACE(Montgomery_R2)
+extern const uint64_t Montgomery_R2[S2N_SIKE_P434_R3_NWORDS64_FIELD];
+
+#define Montgomery_one S2N_SIKE_P434_R3_NAMESPACE(Montgomery_one)
+extern const uint64_t Montgomery_one[S2N_SIKE_P434_R3_NWORDS64_FIELD];
+
+#define strat_Alice S2N_SIKE_P434_R3_NAMESPACE(strat_Alice)
+extern const unsigned int strat_Alice[S2N_SIKE_P434_R3_MAX_ALICE-1];
+
+#define strat_Bob S2N_SIKE_P434_R3_NAMESPACE(strat_Bob)
+extern const unsigned int strat_Bob[S2N_SIKE_P434_R3_MAX_BOB-1];
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_api.h b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_api.h
new file mode 100644
index 0000000000..cf3c4feb85
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_api.h
@@ -0,0 +1,78 @@
+/********************************************************************************************
+* Supersingular Isogeny Key Encapsulation Library
+*
+* Abstract: API header file for P434
+*********************************************************************************************/
+
+#pragma once
+
+#include "sikep434r3.h"
+
+/*********************** Key encapsulation mechanism API ***********************/
+/* Encoding of keys for KEM-based isogeny system "SIKEp434" (wire format):
+ *
+ * Elements over GF(p434) are encoded in 55 octets in little endian format (i.e., the least
+ * significant octet is located in the lowest memory address). Elements (a+b*i) over GF(p434^2),
+ * where a and b are defined over GF(p434), are encoded as {a, b}, with a in the lowest memory portion.
+ *
+ * Private keys sk consist of the concatenation of a 16-byte random value, a value in the range
+ * [0, 2^217-1] and the public key pk. In the SIKE API, private keys are encoded in 374 octets in
+ * little endian format. Public keys pk consist of 3 elements in GF(p434^2). In the SIKE API, pk
+ * is encoded in 330 octets. Ciphertexts ct consist of the concatenation of a public key value
+ * and a 16-byte value. In the SIKE API, ct is encoded in 330 + 16 = 346 octets. Shared keys ss
+ * consist of a value of 16 octets. */
+
+/*********************** Ephemeral key exchange API ***********************/
+
+/* SECURITY NOTE: SIDH supports ephemeral Diffie-Hellman key exchange. It is NOT secure to use
+ * it with static keys. See "On the Security of Supersingular Isogeny Cryptosystems", S.D. Galbraith,
+ * C. Petit, B. Shani and Y.B. Ti, in ASIACRYPT 2016, 2016. Extended version available at:
+ * http://eprint.iacr.org/2016/859 */
+
+/* Generation of Bob's secret key
+ * Outputs random value in [0, 2^Floor(Log(2,3^137)) - 1] to be used as Bob's private key */
+#define random_mod_order_B S2N_SIKE_P434_R3_NAMESPACE(random_mod_order_B)
+int random_mod_order_B(unsigned char* random_digits);
+
+/* Alice's ephemeral public key generation
+ * Input: a private key PrivateKeyA in the range [0, 2^216 - 1], stored in 27 bytes.
+ * Output: the public key PublicKeyA consisting of 3 GF(p434^2) elements encoded in 330 bytes. */
+#define EphemeralKeyGeneration_A S2N_SIKE_P434_R3_NAMESPACE(EphemeralKeyGeneration_A)
+int EphemeralKeyGeneration_A(const unsigned char* PrivateKeyA, unsigned char* PublicKeyA);
+
+/* Bob's ephemeral key-pair generation
+ * It produces a private key PrivateKeyB and computes the public key PublicKeyB.
+ * The private key is an integer in the range [0, 2^Floor(Log(2,3^137)) - 1], stored in 28 bytes.
+ * The public key consists of 3 GF(p434^2) elements encoded in 330 bytes. */
+#define EphemeralKeyGeneration_B S2N_SIKE_P434_R3_NAMESPACE(EphemeralKeyGeneration_B)
+int EphemeralKeyGeneration_B(const unsigned char* PrivateKeyB, unsigned char* PublicKeyB);
+
+/* Alice's ephemeral shared secret computation
+ * It produces a shared secret key SharedSecretA using her secret key PrivateKeyA and Bob's public key PublicKeyB
+ * Inputs: Alice's PrivateKeyA is an integer in the range [0, 2^216 - 1], stored in 27 bytes.
+ * Bob's PublicKeyB consists of 3 GF(p434^2) elements encoded in 330 bytes.
+ * Output: a shared secret SharedSecretA that consists of one element in GF(p434^2) encoded in 110 bytes. */
+#define EphemeralSecretAgreement_A S2N_SIKE_P434_R3_NAMESPACE(EphemeralSecretAgreement_A)
+int EphemeralSecretAgreement_A(const unsigned char* PrivateKeyA, const unsigned char* PublicKeyB, unsigned char* SharedSecretA);
+
+/* Bob's ephemeral shared secret computation
+ * It produces a shared secret key SharedSecretB using his secret key PrivateKeyB and Alice's public key PublicKeyA
+ * Inputs: Bob's PrivateKeyB is an integer in the range [0, 2^Floor(Log(2,3^137)) - 1], stored in 28 bytes.
+ * Alice's PublicKeyA consists of 3 GF(p434^2) elements encoded in 330 bytes.
+ * Output: a shared secret SharedSecretB that consists of one element in GF(p434^2) encoded in 110 bytes. */
+#define EphemeralSecretAgreement_B S2N_SIKE_P434_R3_NAMESPACE(EphemeralSecretAgreement_B)
+int EphemeralSecretAgreement_B(const unsigned char* PrivateKeyB, const unsigned char* PublicKeyA, unsigned char* SharedSecretB);
+
+/* Encoding of keys for KEX-based isogeny system "SIDHp434" (wire format):
+ *
+ * Elements over GF(p434) are encoded in 55 octets in little endian format (i.e., the
+ * least significant octet is located in the lowest memory address). Elements (a+b*i)
+ * over GF(p434^2), where a and b are defined over GF(p434), are encoded as {a, b}, with
+ * a in the lowest memory portion.
+ *
+ * Private keys PrivateKeyA and PrivateKeyB can have values in the range [0, 2^216-1] and
+ * [0, 2^Floor(Log(2,3^137)) - 1], resp. In the SIDH API, Alice's and Bob's private keys
+ * are encoded in 27 and 28 octets, resp., in little endian format. Public keys PublicKeyA
+ * and PublicKeyB consist of 3 elements in GF(p434^2). In the SIDH API, they are encoded in
+ * 330 octets. Shared keys SharedSecretA and SharedSecretB consist of one element in GF(p434^2).
+ * In the SIDH API, they are encoded in 110 octets. */
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_ec_isogeny.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_ec_isogeny.c
new file mode 100644
index 0000000000..e5ae4e7c7e
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_ec_isogeny.c
@@ -0,0 +1,348 @@
+/********************************************************************************************
+* Supersingular Isogeny Key Encapsulation Library
+*
+* Abstract: elliptic curve and isogeny functions
+*********************************************************************************************/
+
+#include "sikep434r3.h"
+#include "sikep434r3_fpx.h"
+#include "sikep434r3_ec_isogeny.h"
+
+/* Doubling of a Montgomery point in projective coordinates (X:Z).
+ * Input: projective Montgomery x-coordinates P = (X1:Z1), where x1=X1/Z1 and Montgomery curve constants A+2C and 4C.
+ * Output: projective Montgomery x-coordinates Q = 2*P = (X2:Z2). */
+void xDBL(const point_proj_t P, point_proj_t Q, const f2elm_t *A24plus, const f2elm_t *C24)
+{
+ f2elm_t _t0, _t1;
+ f2elm_t *t0=&_t0, *t1=&_t1;
+
+ mp2_sub_p2(&P->X, &P->Z, t0); /* t0 = X1-Z1 */
+ mp2_add(&P->X, &P->Z, t1); /* t1 = X1+Z1 */
+ fp2sqr_mont(t0, t0); /* t0 = (X1-Z1)^2 */
+ fp2sqr_mont(t1, t1); /* t1 = (X1+Z1)^2 */
+ fp2mul_mont(C24, t0, &Q->Z); /* Z2 = C24*(X1-Z1)^2 */
+ fp2mul_mont(t1, &Q->Z, &Q->X); /* X2 = C24*(X1-Z1)^2*(X1+Z1)^2 */
+ mp2_sub_p2(t1, t0, t1); /* t1 = (X1+Z1)^2-(X1-Z1)^2 */
+ fp2mul_mont(A24plus, t1, t0); /* t0 = A24plus*[(X1+Z1)^2-(X1-Z1)^2] */
+ mp2_add(&Q->Z, t0, &Q->Z); /* Z2 = A24plus*[(X1+Z1)^2-(X1-Z1)^2] + C24*(X1-Z1)^2 */
+ fp2mul_mont(&Q->Z, t1, &Q->Z); /* Z2 = [A24plus*[(X1+Z1)^2-(X1-Z1)^2] + C24*(X1-Z1)^2]*[(X1+Z1)^2-(X1-Z1)^2] */
+}
+
+/* Computes [2^e](X:Z) on Montgomery curve with projective constant via e repeated doublings.
+ * Input: projective Montgomery x-coordinates P = (XP:ZP), such that xP=XP/ZP and Montgomery curve constants A+2C and 4C.
+ * Output: projective Montgomery x-coordinates Q <- (2^e)*P. */
+void xDBLe(const point_proj_t P, point_proj_t Q, const f2elm_t *A24plus, const f2elm_t *C24, const int e)
+{
+ int i;
+
+ copy_words((const digit_t*)P, (digit_t*)Q, 2*2*S2N_SIKE_P434_R3_NWORDS_FIELD);
+
+ for (i = 0; i < e; i++) {
+ xDBL(Q, Q, A24plus, C24);
+ }
+}
+
+/* Computes the corresponding 4-isogeny of a projective Montgomery point (X4:Z4) of order 4.
+ * Input: projective point of order four P = (X4:Z4).
+ * Output: the 4-isogenous Montgomery curve with projective coefficients A+2C/4C and the 3 coefficients
+ * that are used to evaluate the isogeny at a point in eval_4_isog(). */
+void get_4_isog(const point_proj_t P, f2elm_t *A24plus, f2elm_t *C24, f2elm_t *coeff)
+{
+ mp2_sub_p2(&P->X, &P->Z, &coeff[1]); /* coeff[1] = X4-Z4 */
+ mp2_add(&P->X, &P->Z, &coeff[2]); /* coeff[2] = X4+Z4 */
+ fp2sqr_mont(&P->Z, &coeff[0]); /* coeff[0] = Z4^2 */
+ mp2_add(&coeff[0], &coeff[0], &coeff[0]); /* coeff[0] = 2*Z4^2 */
+ fp2sqr_mont(&coeff[0], C24); /* C24 = 4*Z4^4 */
+ mp2_add(&coeff[0], &coeff[0], &coeff[0]); /* coeff[0] = 4*Z4^2 */
+ fp2sqr_mont(&P->X, A24plus); /* A24plus = X4^2 */
+ mp2_add(A24plus, A24plus, A24plus); /* A24plus = 2*X4^2 */
+ fp2sqr_mont(A24plus, A24plus); /* A24plus = 4*X4^4 */
+}
+
+/* Evaluates the isogeny at the point (X:Z) in the domain of the isogeny, given a 4-isogeny phi defined
+ * by the 3 coefficients in coeff (computed in the function get_4_isog()).
+ * Inputs: the coefficients defining the isogeny, and the projective point P = (X:Z).
+ * Output: the projective point P = phi(P) = (X:Z) in the codomain. */
+void eval_4_isog(point_proj_t P, f2elm_t *coeff)
+{
+ f2elm_t _t0, _t1;
+ f2elm_t *t0=&_t0, *t1=&_t1;
+
+ mp2_add(&P->X, &P->Z, t0); /* t0 = X+Z */
+ mp2_sub_p2(&P->X, &P->Z, t1); /* t1 = X-Z */
+ fp2mul_mont(t0, &coeff[1], &P->X); /* X = (X+Z)*coeff[1] */
+ fp2mul_mont(t1, &coeff[2], &P->Z); /* Z = (X-Z)*coeff[2] */
+ fp2mul_mont(t0, t1, t0); /* t0 = (X+Z)*(X-Z) */
+ fp2mul_mont(&coeff[0], t0, t0); /* t0 = coeff[0]*(X+Z)*(X-Z) */
+ mp2_add(&P->X, &P->Z, t1); /* t1 = (X-Z)*coeff[2] + (X+Z)*coeff[1] */
+ mp2_sub_p2(&P->X, &P->Z, &P->Z); /* Z = (X-Z)*coeff[2] - (X+Z)*coeff[1] */
+ fp2sqr_mont(t1, t1); /* t1 = [(X-Z)*coeff[2] + (X+Z)*coeff[1]]^2 */
+ fp2sqr_mont(&P->Z, &P->Z); /* Z = [(X-Z)*coeff[2] - (X+Z)*coeff[1]]^2 */
+ mp2_add(t1, t0, &P->X); /* X = coeff[0]*(X+Z)*(X-Z) + [(X-Z)*coeff[2] + (X+Z)*coeff[1]]^2 */
+ mp2_sub_p2(&P->Z, t0, t0); /* t0 = [(X-Z)*coeff[2] - (X+Z)*coeff[1]]^2 - coeff[0]*(X+Z)*(X-Z) */
+ fp2mul_mont(&P->X, t1, &P->X); /* Xfinal */
+ fp2mul_mont(&P->Z, t0, &P->Z); /* Zfinal */
+}
+
+/* Tripling of a Montgomery point in projective coordinates (X:Z).
+ * Input: projective Montgomery x-coordinates P = (X:Z), where x=X/Z and Montgomery curve constants A24plus = A+2C and A24minus = A-2C.
+ * Output: projective Montgomery x-coordinates Q = 3*P = (X3:Z3). */
+void xTPL(const point_proj_t P, point_proj_t Q, const f2elm_t *A24minus, const f2elm_t *A24plus)
+{
+ f2elm_t _t0, _t1, _t2, _t3, _t4, _t5, _t6;
+ f2elm_t *t0=&_t0, *t1=&_t1, *t2=&_t2, *t3=&_t3, *t4=&_t4, *t5=&_t5, *t6=&_t6;
+
+ mp2_sub_p2(&P->X, &P->Z, t0); /* t0 = X-Z */
+ fp2sqr_mont(t0, t2); /* t2 = (X-Z)^2 */
+ mp2_add(&P->X, &P->Z, t1); /* t1 = X+Z */
+ fp2sqr_mont(t1, t3); /* t3 = (X+Z)^2 */
+ mp2_add(&P->X, &P->X, t4); /* t4 = 2*X */
+ mp2_add(&P->Z, &P->Z, t0); /* t0 = 2*Z */
+ fp2sqr_mont(t4, t1); /* t1 = 4*X^2 */
+ mp2_sub_p2(t1, t3, t1); /* t1 = 4*X^2 - (X+Z)^2 */
+ mp2_sub_p2(t1, t2, t1); /* t1 = 4*X^2 - (X+Z)^2 - (X-Z)^2 */
+ fp2mul_mont(A24plus, t3, t5); /* t5 = A24plus*(X+Z)^2 */
+ fp2mul_mont(t3, t5, t3); /* t3 = A24plus*(X+Z)^4 */
+ fp2mul_mont(A24minus, t2, t6); /* t6 = A24minus*(X-Z)^2 */
+ fp2mul_mont(t2, t6, t2); /* t2 = A24minus*(X-Z)^4 */
+ mp2_sub_p2(t2, t3, t3); /* t3 = A24minus*(X-Z)^4 - A24plus*(X+Z)^4 */
+ mp2_sub_p2(t5, t6, t2); /* t2 = A24plus*(X+Z)^2 - A24minus*(X-Z)^2 */
+ fp2mul_mont(t1, t2, t1); /* t1 = [4*X^2 - (X+Z)^2 - (X-Z)^2]*[A24plus*(X+Z)^2 - A24minus*(X-Z)^2] */
+ fp2add(t3, t1, t2); /* t2 = [4*X^2 - (X+Z)^2 - (X-Z)^2]*[A24plus*(X+Z)^2 - A24minus*(X-Z)^2] + A24minus*(X-Z)^4 - A24plus*(X+Z)^4 */
+ fp2sqr_mont(t2, t2); /* t2 = t2^2 */
+ fp2mul_mont(t4, t2, &Q->X); /* X3 = 2*X*t2 */
+ fp2sub(t3, t1, t1); /* t1 = A24minus*(X-Z)^4 - A24plus*(X+Z)^4 - [4*X^2 - (X+Z)^2 - (X-Z)^2]*[A24plus*(X+Z)^2 - A24minus*(X-Z)^2] */
+ fp2sqr_mont(t1, t1); /* t1 = t1^2 */
+ fp2mul_mont(t0, t1, &Q->Z); /* Z3 = 2*Z*t1 */
+}
+
+/* Computes [3^e](X:Z) on Montgomery curve with projective constant via e repeated triplings.
+ * Input: projective Montgomery x-coordinates P = (XP:ZP), such that xP=XP/ZP and Montgomery curve constants A24plus = A+2C and A24minus = A-2C.
+ * Output: projective Montgomery x-coordinates Q <- (3^e)*P. */
+void xTPLe(const point_proj_t P, point_proj_t Q, const f2elm_t *A24minus, const f2elm_t *A24plus, const int e)
+{
+ int i;
+
+ copy_words((const digit_t*)P, (digit_t*)Q, 2*2*S2N_SIKE_P434_R3_NWORDS_FIELD);
+
+ for (i = 0; i < e; i++) {
+ xTPL(Q, Q, A24minus, A24plus);
+ }
+}
+
+/* Computes the corresponding 3-isogeny of a projective Montgomery point (X3:Z3) of order 3.
+ * Input: projective point of order three P = (X3:Z3).
+ * Output: the 3-isogenous Montgomery curve with projective coefficient A/C. */
+void get_3_isog(const point_proj_t P, f2elm_t *A24minus, f2elm_t *A24plus, f2elm_t *coeff)
+{
+ f2elm_t _t0, _t1, _t2, _t3, _t4;
+ f2elm_t *t0=&_t0, *t1=&_t1, *t2=&_t2, *t3=&_t3, *t4=&_t4;
+
+ mp2_sub_p2(&P->X, &P->Z, &coeff[0]); /* coeff0 = X-Z */
+ fp2sqr_mont(&coeff[0], t0); /* t0 = (X-Z)^2 */
+ mp2_add(&P->X, &P->Z, &coeff[1]); /* coeff1 = X+Z */
+ fp2sqr_mont(&coeff[1], t1); /* t1 = (X+Z)^2 */
+ mp2_add(&P->X, &P->X, t3); /* t3 = 2*X */
+ fp2sqr_mont(t3, t3); /* t3 = 4*X^2 */
+ fp2sub(t3, t0, t2); /* t2 = 4*X^2 - (X-Z)^2 */
+ fp2sub(t3, t1, t3); /* t3 = 4*X^2 - (X+Z)^2 */
+ mp2_add(t0, t3, t4); /* t4 = 4*X^2 - (X+Z)^2 + (X-Z)^2 */
+ mp2_add(t4, t4, t4); /* t4 = 2(4*X^2 - (X+Z)^2 + (X-Z)^2) */
+ mp2_add(t1, t4, t4); /* t4 = 8*X^2 - (X+Z)^2 + 2*(X-Z)^2 */
+ fp2mul_mont(t2, t4, A24minus); /* A24minus = [4*X^2 - (X-Z)^2]*[8*X^2 - (X+Z)^2 + 2*(X-Z)^2] */
+ mp2_add(t1, t2, t4); /* t4 = 4*X^2 + (X+Z)^2 - (X-Z)^2 */
+ mp2_add(t4, t4, t4); /* t4 = 2(4*X^2 + (X+Z)^2 - (X-Z)^2) */
+ mp2_add(t0, t4, t4); /* t4 = 8*X^2 + 2*(X+Z)^2 - (X-Z)^2 */
+ fp2mul_mont(t3, t4, A24plus); /* A24plus = [4*X^2 - (X+Z)^2]*[8*X^2 + 2*(X+Z)^2 - (X-Z)^2] */
+}
+
+/* Computes the 3-isogeny R=phi(X:Z), given projective point (X3:Z3) of order 3 on a Montgomery curve and
+ * a point P with 2 coefficients in coeff (computed in the function get_3_isog()).
+ * Inputs: projective points P = (X3:Z3) and Q = (X:Z).
+ * Output: the projective point Q <- phi(Q) = (X3:Z3). */
+void eval_3_isog(point_proj_t Q, const f2elm_t *coeff)
+{
+ f2elm_t _t0, _t1, _t2;
+ f2elm_t *t0=&_t0, *t1=&_t1, *t2=&_t2;
+
+ mp2_add(&Q->X, &Q->Z, t0); /* t0 = X+Z */
+ mp2_sub_p2(&Q->X, &Q->Z, t1); /* t1 = X-Z */
+ fp2mul_mont(&coeff[0], t0, t0); /* t0 = coeff0*(X+Z) */
+ fp2mul_mont(&coeff[1], t1, t1); /* t1 = coeff1*(X-Z) */
+ mp2_add(t0, t1, t2); /* t2 = coeff0*(X+Z) + coeff1*(X-Z) */
+ mp2_sub_p2(t1, t0, t0); /* t0 = coeff1*(X-Z) - coeff0*(X+Z) */
+ fp2sqr_mont(t2, t2); /* t2 = [coeff0*(X+Z) + coeff1*(X-Z)]^2 */
+ fp2sqr_mont(t0, t0); /* t0 = [coeff1*(X-Z) - coeff0*(X+Z)]^2 */
+ fp2mul_mont(&Q->X, t2, &Q->X); /* X3final = X*[coeff0*(X+Z) + coeff1*(X-Z)]^2 */
+ fp2mul_mont(&Q->Z, t0, &Q->Z); /* Z3final = Z*[coeff1*(X-Z) - coeff0*(X+Z)]^2 */
+}
+
+/* 3-way simultaneous inversion
+ * Input: z1,z2,z3
+ * Output: 1/z1,1/z2,1/z3 (override inputs). */
+void inv_3_way(f2elm_t *z1, f2elm_t *z2, f2elm_t *z3)
+{
+ f2elm_t _t0, _t1, _t2, _t3;
+ f2elm_t *t0=&_t0, *t1=&_t1, *t2=&_t2, *t3=&_t3;
+
+ fp2mul_mont(z1, z2, t0); /* t0 = z1*z2 */
+ fp2mul_mont(z3, t0, t1); /* t1 = z1*z2*z3 */
+ fp2inv_mont(t1); /* t1 = 1/(z1*z2*z3) */
+ fp2mul_mont(z3, t1, t2); /* t2 = 1/(z1*z2) */
+ fp2mul_mont(t2, z2, t3); /* t3 = 1/z1 */
+ fp2mul_mont(t2, z1, z2); /* z2 = 1/z2 */
+ fp2mul_mont(t0, t1, z3); /* z3 = 1/z3 */
+ fp2copy(t3, z1); /* z1 = 1/z1 */
+}
+
+/* Given the x-coordinates of P, Q, and R, returns the value A corresponding to the
+ * Montgomery curve E_A: y^2=x^3+A*x^2+x such that R=Q-P on E_A.
+ * Input: the x-coordinates xP, xQ, and xR of the points P, Q and R.
+ * Output: the coefficient A corresponding to the curve E_A: y^2=x^3+A*x^2+x. */
+void get_A(const f2elm_t *xP, const f2elm_t *xQ, const f2elm_t *xR, f2elm_t *A)
+{
+ f2elm_t _t0, _t1, one = {0};
+ f2elm_t *t0=&_t0, *t1=&_t1;
+
+
+ fpcopy((const digit_t*)&Montgomery_one,one.e[0]);
+ fp2add(xP, xQ, t1); /* t1 = xP+xQ */
+ fp2mul_mont(xP, xQ, t0); /* t0 = xP*xQ */
+ fp2mul_mont(xR, t1, A); /* A = xR*t1 */
+ fp2add(t0, A, A); /* A = A+t0 */
+ fp2mul_mont(t0, xR, t0); /* t0 = t0*xR */
+ fp2sub(A, &one, A); /* A = A-1 */
+ fp2add(t0, t0, t0); /* t0 = t0+t0 */
+ fp2add(t1, xR, t1); /* t1 = t1+xR */
+ fp2add(t0, t0, t0); /* t0 = t0+t0 */
+ fp2sqr_mont(A, A); /* A = A^2 */
+ fp2inv_mont(t0); /* t0 = 1/t0 */
+ fp2mul_mont(A, t0, A); /* A = A*t0 */
+ fp2sub(A, t1, A); /* Afinal = A-t1 */
+}
+
+/* Computes the j-invariant of a Montgomery curve with projective constant.
+ * Input: A,C in GF(p^2).
+ * Output: j=256*(A^2-3*C^2)^3/(C^4*(A^2-4*C^2)), which is the j-invariant of the Montgomery curve
+ * B*y^2=x^3+(A/C)*x^2+x or (equivalently) j-invariant of B'*y^2=C*x^3+A*x^2+C*x. */
+void j_inv(const f2elm_t *A, const f2elm_t *C, f2elm_t *jinv)
+{
+ f2elm_t _t0, _t1;
+ f2elm_t *t0=&_t0, *t1=&_t1;
+
+ fp2sqr_mont(A, jinv); /* jinv = A^2 */
+ fp2sqr_mont(C, t1); /* t1 = C^2 */
+ fp2add(t1, t1, t0); /* t0 = t1+t1 */
+ fp2sub(jinv, t0, t0); /* t0 = jinv-t0 */
+ fp2sub(t0, t1, t0); /* t0 = t0-t1 */
+ fp2sub(t0, t1, jinv); /* jinv = t0-t1 */
+ fp2sqr_mont(t1, t1); /* t1 = t1^2 */
+ fp2mul_mont(jinv, t1, jinv); /* jinv = jinv*t1 */
+ fp2add(t0, t0, t0); /* t0 = t0+t0 */
+ fp2add(t0, t0, t0); /* t0 = t0+t0 */
+ fp2sqr_mont(t0, t1); /* t1 = t0^2 */
+ fp2mul_mont(t0, t1, t0); /* t0 = t0*t1 */
+ fp2add(t0, t0, t0); /* t0 = t0+t0 */
+ fp2add(t0, t0, t0); /* t0 = t0+t0 */
+ fp2inv_mont(jinv); /* jinv = 1/jinv */
+ fp2mul_mont(jinv, t0, jinv); /* jinv = t0*jinv */
+}
+
+/* Simultaneous doubling and differential addition.
+ * Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ,
+ * affine difference xPQ=x(P-Q) and Montgomery curve constant A24=(A+2)/4.
+ * Output: projective Montgomery points P <- 2*P = (X2P:Z2P) such that x(2P)=X2P/Z2P,
+ * and Q <- P+Q = (XQP:ZQP) such that = x(Q+P)=XQP/ZQP. */
+static void xDBLADD(point_proj_t P, point_proj_t Q, const f2elm_t *xPQ, const f2elm_t *A24)
+{
+ f2elm_t _t0, _t1, _t2;
+ f2elm_t *t0=&_t0, *t1=&_t1, *t2=&_t2;
+
+ mp2_add(&P->X, &P->Z, t0); /* t0 = XP+ZP */
+ mp2_sub_p2(&P->X, &P->Z, t1); /* t1 = XP-ZP */
+ fp2sqr_mont(t0, &P->X); /* XP = (XP+ZP)^2 */
+ mp2_sub_p2(&Q->X, &Q->Z, t2); /* t2 = XQ-ZQ */
+ mp2_add(&Q->X, &Q->Z, &Q->X); /* XQ = XQ+ZQ */
+ fp2mul_mont(t0, t2, t0); /* t0 = (XP+ZP)*(XQ-ZQ) */
+ fp2sqr_mont(t1, &P->Z); /* ZP = (XP-ZP)^2 */
+ fp2mul_mont(t1, &Q->X, t1); /* t1 = (XP-ZP)*(XQ+ZQ) */
+ mp2_sub_p2(&P->X, &P->Z, t2); /* t2 = (XP+ZP)^2-(XP-ZP)^2 */
+ fp2mul_mont(&P->X, &P->Z, &P->X); /* XP = (XP+ZP)^2*(XP-ZP)^2 */
+ fp2mul_mont(A24, t2, &Q->X); /* XQ = A24*[(XP+ZP)^2-(XP-ZP)^2] */
+ mp2_sub_p2(t0, t1, &Q->Z); /* ZQ = (XP+ZP)*(XQ-ZQ)-(XP-ZP)*(XQ+ZQ) */
+ mp2_add(&Q->X, &P->Z, &P->Z); /* ZP = A24*[(XP+ZP)^2-(XP-ZP)^2]+(XP-ZP)^2 */
+ mp2_add(t0, t1, &Q->X); /* XQ = (XP+ZP)*(XQ-ZQ)+(XP-ZP)*(XQ+ZQ) */
+ fp2mul_mont(&P->Z, t2, &P->Z); /* ZP = [A24*[(XP+ZP)^2-(XP-ZP)^2]+(XP-ZP)^2]*[(XP+ZP)^2-(XP-ZP)^2] */
+ fp2sqr_mont(&Q->Z, &Q->Z); /* ZQ = [(XP+ZP)*(XQ-ZQ)-(XP-ZP)*(XQ+ZQ)]^2 */
+ fp2sqr_mont(&Q->X, &Q->X); /* XQ = [(XP+ZP)*(XQ-ZQ)+(XP-ZP)*(XQ+ZQ)]^2 */
+ fp2mul_mont(&Q->Z, xPQ, &Q->Z); /* ZQ = xPQ*[(XP+ZP)*(XQ-ZQ)-(XP-ZP)*(XQ+ZQ)]^2 */
+}
+
+/* Swap points.
+ * If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then P <- Q and Q <- P */
+static void swap_points(point_proj_t P, point_proj_t Q, const digit_t option)
+{
+ unsigned int i;
+
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ digit_t temp = option & (P->X.e[0][i] ^ Q->X.e[0][i]);
+ P->X.e[0][i] = temp ^ P->X.e[0][i];
+ Q->X.e[0][i] = temp ^ Q->X.e[0][i];
+ temp = option & (P->X.e[1][i] ^ Q->X.e[1][i]);
+ P->X.e[1][i] = temp ^ P->X.e[1][i];
+ Q->X.e[1][i] = temp ^ Q->X.e[1][i];
+ temp = option & (P->Z.e[0][i] ^ Q->Z.e[0][i]);
+ P->Z.e[0][i] = temp ^ P->Z.e[0][i];
+ Q->Z.e[0][i] = temp ^ Q->Z.e[0][i];
+ temp = option & (P->Z.e[1][i] ^ Q->Z.e[1][i]);
+ P->Z.e[1][i] = temp ^ P->Z.e[1][i];
+ Q->Z.e[1][i] = temp ^ Q->Z.e[1][i];
+ }
+}
+
+void LADDER3PT(const f2elm_t *xP, const f2elm_t *xQ, const f2elm_t *xPQ, const digit_t* m,
+ const unsigned int AliceOrBob, point_proj_t R, const f2elm_t *A)
+{
+ point_proj_t R0 = {0}, R2 = {0};
+ f2elm_t _A24 = {0};
+ f2elm_t *A24 = &_A24;
+ digit_t mask;
+ int i, nbits, swap, prevbit = 0;
+
+ if (AliceOrBob == S2N_SIKE_P434_R3_ALICE) {
+ nbits = S2N_SIKE_P434_R3_OALICE_BITS;
+ } else {
+ nbits = S2N_SIKE_P434_R3_OBOB_BITS - 1;
+ }
+
+ /* Initializing constant */
+ fpcopy((const digit_t*)&Montgomery_one, A24->e[0]);
+ mp2_add(A24, A24, A24);
+ mp2_add(A, A24, A24);
+ fp2div2(A24, A24);
+ fp2div2(A24, A24); /* A24 = (A+2)/4 */
+
+ /* Initializing points */
+ fp2copy(xQ, &R0->X);
+ fpcopy((const digit_t*)&Montgomery_one, (digit_t*)&R0->Z);
+ fp2copy(xPQ, &R2->X);
+ fpcopy((const digit_t*)&Montgomery_one, (digit_t*)&R2->Z);
+ fp2copy(xP, &R->X);
+ fpcopy((const digit_t*)&Montgomery_one, (digit_t*)&R->Z);
+ fpzero((digit_t*)(R->Z.e)[1]);
+
+ /* Main loop */
+ for (i = 0; i < nbits; i++) {
+ int bit = (m[i >> S2N_SIKE_P434_R3_LOG2RADIX] >> (i & (S2N_SIKE_P434_R3_RADIX-1))) & 1;
+ swap = bit ^ prevbit;
+ prevbit = bit;
+ mask = 0 - (digit_t)swap;
+
+ swap_points(R, R2, mask);
+ xDBLADD(R0, R2, &R->X, A24);
+ fp2mul_mont(&R2->X, &R->Z, &R2->X);
+ }
+ swap = 0 ^ prevbit;
+ mask = 0 - (digit_t)swap;
+ swap_points(R, R2, mask);
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_ec_isogeny.h b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_ec_isogeny.h
new file mode 100644
index 0000000000..44245ec726
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_ec_isogeny.h
@@ -0,0 +1,46 @@
+/********************************************************************************************
+* Supersingular Isogeny Key Encapsulation Library
+*
+* Abstract: elliptic curve and isogeny functions
+*********************************************************************************************/
+
+#pragma once
+
+#include "sikep434r3.h"
+
+#define xDBL S2N_SIKE_P434_R3_NAMESPACE(xDBL)
+void xDBL(const point_proj_t P, point_proj_t Q, const f2elm_t *A24plus, const f2elm_t *C24);
+
+#define xDBLe S2N_SIKE_P434_R3_NAMESPACE(xDBLe)
+void xDBLe(const point_proj_t P, point_proj_t Q, const f2elm_t *A24plus, const f2elm_t *C24, const int e);
+
+#define get_4_isog S2N_SIKE_P434_R3_NAMESPACE(get_4_isog)
+void get_4_isog(const point_proj_t P, f2elm_t *A24plus, f2elm_t *C24, f2elm_t *coeff);
+
+#define eval_4_isog S2N_SIKE_P434_R3_NAMESPACE(eval_4_isog)
+void eval_4_isog(point_proj_t P, f2elm_t* coeff);
+
+#define xTPL S2N_SIKE_P434_R3_NAMESPACE(xTPL)
+void xTPL(const point_proj_t P, point_proj_t Q, const f2elm_t *A24minus, const f2elm_t *A24plus);
+
+#define xTPLe S2N_SIKE_P434_R3_NAMESPACE(xTPLe)
+void xTPLe(const point_proj_t P, point_proj_t Q, const f2elm_t *A24minus, const f2elm_t *A24plus, const int e);
+
+#define get_3_isog S2N_SIKE_P434_R3_NAMESPACE(get_3_isog)
+void get_3_isog(const point_proj_t P, f2elm_t *A24minus, f2elm_t *A24plus, f2elm_t *coeff);
+
+#define eval_3_isog S2N_SIKE_P434_R3_NAMESPACE(eval_3_isog)
+void eval_3_isog(point_proj_t Q, const f2elm_t *coeff);
+
+#define inv_3_way S2N_SIKE_P434_R3_NAMESPACE(inv_3_way)
+void inv_3_way(f2elm_t *z1, f2elm_t *z2, f2elm_t *z3);
+
+#define get_A S2N_SIKE_P434_R3_NAMESPACE(get_A)
+void get_A(const f2elm_t *xP, const f2elm_t *xQ, const f2elm_t *xR, f2elm_t *A);
+
+#define j_inv S2N_SIKE_P434_R3_NAMESPACE(j_inv)
+void j_inv(const f2elm_t *A, const f2elm_t *C, f2elm_t *jinv);
+
+#define LADDER3PT S2N_SIKE_P434_R3_NAMESPACE(LADDER3PT)
+void LADDER3PT(const f2elm_t *xP, const f2elm_t *xQ, const f2elm_t *xPQ, const digit_t *m,
+ const unsigned int AliceOrBob, point_proj_t R, const f2elm_t *A);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fips202.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fips202.c
new file mode 100644
index 0000000000..413cb2b8e4
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fips202.c
@@ -0,0 +1,417 @@
+/********************************************************************************************
+* SHA3-derived function SHAKE
+*
+* Based on the public domain implementation in crypto_hash/keccakc512/simple/
+* from http://bench.cr.yp.to/supercop.html by Ronny Van Keer
+* and the public domain "TweetFips202" implementation from https://twitter.com/tweetfips202
+* by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe
+*
+* See NIST Special Publication 800-185 for more information:
+* http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-185.pdf
+*
+*********************************************************************************************/
+
+#include <stdint.h>
+#include <stddef.h>
+#include "sikep434r3.h"
+#include "sikep434r3_fips202.h"
+
+#define NROUNDS 24
+#define ROL(a, offset) ((a << offset) ^ (a >> (64-offset)))
+
+/*************************************************
+ * Name: load64
+ *
+ * Description: Load 8 bytes into uint64_t in little-endian order
+ *
+ * Arguments: - const uint8_t *x: pointer to input byte array
+ *
+ * Returns the loaded 64-bit unsigned integer
+ **************************************************/
+static uint64_t load64(const uint8_t *x) {
+ uint64_t r = 0;
+ for (size_t i = 0; i < 8; ++i) {
+ r |= (uint64_t)x[i] << 8 * i;
+ }
+
+ return r;
+}
+
+/*************************************************
+ * Name: store64
+ *
+ * Description: Store a 64-bit integer to a byte array in little-endian order
+ *
+ * Arguments: - uint8_t *x: pointer to the output byte array
+ * - uint64_t u: input 64-bit unsigned integer
+ **************************************************/
+static void store64(uint8_t *x, uint64_t u) {
+ for (size_t i = 0; i < 8; ++i) {
+ x[i] = (uint8_t) (u >> 8 * i);
+ }
+}
+
+static const uint64_t KeccakF_RoundConstants[NROUNDS] = {
+ (uint64_t)0x0000000000000001ULL,
+ (uint64_t)0x0000000000008082ULL,
+ (uint64_t)0x800000000000808aULL,
+ (uint64_t)0x8000000080008000ULL,
+ (uint64_t)0x000000000000808bULL,
+ (uint64_t)0x0000000080000001ULL,
+ (uint64_t)0x8000000080008081ULL,
+ (uint64_t)0x8000000000008009ULL,
+ (uint64_t)0x000000000000008aULL,
+ (uint64_t)0x0000000000000088ULL,
+ (uint64_t)0x0000000080008009ULL,
+ (uint64_t)0x000000008000000aULL,
+ (uint64_t)0x000000008000808bULL,
+ (uint64_t)0x800000000000008bULL,
+ (uint64_t)0x8000000000008089ULL,
+ (uint64_t)0x8000000000008003ULL,
+ (uint64_t)0x8000000000008002ULL,
+ (uint64_t)0x8000000000000080ULL,
+ (uint64_t)0x000000000000800aULL,
+ (uint64_t)0x800000008000000aULL,
+ (uint64_t)0x8000000080008081ULL,
+ (uint64_t)0x8000000000008080ULL,
+ (uint64_t)0x0000000080000001ULL,
+ (uint64_t)0x8000000080008008ULL,
+};
+
+static void KeccakF1600_StatePermute(uint64_t * state)
+{
+ int round;
+ uint64_t Aba, Abe, Abi, Abo, Abu;
+ uint64_t Aga, Age, Agi, Ago, Agu;
+ uint64_t Aka, Ake, Aki, Ako, Aku;
+ uint64_t Ama, Ame, Ami, Amo, Amu;
+ uint64_t Asa, Ase, Asi, Aso, Asu;
+
+ /* copyFromState(A, state) */
+ Aba = state[ 0];
+ Abe = state[ 1];
+ Abi = state[ 2];
+ Abo = state[ 3];
+ Abu = state[ 4];
+ Aga = state[ 5];
+ Age = state[ 6];
+ Agi = state[ 7];
+ Ago = state[ 8];
+ Agu = state[ 9];
+ Aka = state[10];
+ Ake = state[11];
+ Aki = state[12];
+ Ako = state[13];
+ Aku = state[14];
+ Ama = state[15];
+ Ame = state[16];
+ Ami = state[17];
+ Amo = state[18];
+ Amu = state[19];
+ Asa = state[20];
+ Ase = state[21];
+ Asi = state[22];
+ Aso = state[23];
+ Asu = state[24];
+
+ for( round = 0; round < NROUNDS; round += 2 ) {
+ uint64_t BCa, BCe, BCi, BCo, BCu;
+ uint64_t Da, De, Di, Do, Du;
+ uint64_t Eba, Ebe, Ebi, Ebo, Ebu;
+ uint64_t Ega, Ege, Egi, Ego, Egu;
+ uint64_t Eka, Eke, Eki, Eko, Eku;
+ uint64_t Ema, Eme, Emi, Emo, Emu;
+ uint64_t Esa, Ese, Esi, Eso, Esu;
+
+ /* prepareTheta */
+ BCa = Aba^Aga^Aka^Ama^Asa;
+ BCe = Abe^Age^Ake^Ame^Ase;
+ BCi = Abi^Agi^Aki^Ami^Asi;
+ BCo = Abo^Ago^Ako^Amo^Aso;
+ BCu = Abu^Agu^Aku^Amu^Asu;
+
+ /* thetaRhoPiChiIotaPrepareTheta(round , A, E) */
+ Da = BCu^ROL(BCe, 1);
+ De = BCa^ROL(BCi, 1);
+ Di = BCe^ROL(BCo, 1);
+ Do = BCi^ROL(BCu, 1);
+ Du = BCo^ROL(BCa, 1);
+
+ Aba ^= Da;
+ BCa = Aba;
+ Age ^= De;
+ BCe = ROL(Age, 44);
+ Aki ^= Di;
+ BCi = ROL(Aki, 43);
+ Amo ^= Do;
+ BCo = ROL(Amo, 21);
+ Asu ^= Du;
+ BCu = ROL(Asu, 14);
+ Eba = BCa ^((~BCe)& BCi );
+ Eba ^= (uint64_t)KeccakF_RoundConstants[round];
+ Ebe = BCe ^((~BCi)& BCo );
+ Ebi = BCi ^((~BCo)& BCu );
+ Ebo = BCo ^((~BCu)& BCa );
+ Ebu = BCu ^((~BCa)& BCe );
+
+ Abo ^= Do;
+ BCa = ROL(Abo, 28);
+ Agu ^= Du;
+ BCe = ROL(Agu, 20);
+ Aka ^= Da;
+ BCi = ROL(Aka, 3);
+ Ame ^= De;
+ BCo = ROL(Ame, 45);
+ Asi ^= Di;
+ BCu = ROL(Asi, 61);
+ Ega = BCa ^((~BCe)& BCi );
+ Ege = BCe ^((~BCi)& BCo );
+ Egi = BCi ^((~BCo)& BCu );
+ Ego = BCo ^((~BCu)& BCa );
+ Egu = BCu ^((~BCa)& BCe );
+
+ Abe ^= De;
+ BCa = ROL(Abe, 1);
+ Agi ^= Di;
+ BCe = ROL(Agi, 6);
+ Ako ^= Do;
+ BCi = ROL(Ako, 25);
+ Amu ^= Du;
+ BCo = ROL(Amu, 8);
+ Asa ^= Da;
+ BCu = ROL(Asa, 18);
+ Eka = BCa ^((~BCe)& BCi );
+ Eke = BCe ^((~BCi)& BCo );
+ Eki = BCi ^((~BCo)& BCu );
+ Eko = BCo ^((~BCu)& BCa );
+ Eku = BCu ^((~BCa)& BCe );
+
+ Abu ^= Du;
+ BCa = ROL(Abu, 27);
+ Aga ^= Da;
+ BCe = ROL(Aga, 36);
+ Ake ^= De;
+ BCi = ROL(Ake, 10);
+ Ami ^= Di;
+ BCo = ROL(Ami, 15);
+ Aso ^= Do;
+ BCu = ROL(Aso, 56);
+ Ema = BCa ^((~BCe)& BCi );
+ Eme = BCe ^((~BCi)& BCo );
+ Emi = BCi ^((~BCo)& BCu );
+ Emo = BCo ^((~BCu)& BCa );
+ Emu = BCu ^((~BCa)& BCe );
+
+ Abi ^= Di;
+ BCa = ROL(Abi, 62);
+ Ago ^= Do;
+ BCe = ROL(Ago, 55);
+ Aku ^= Du;
+ BCi = ROL(Aku, 39);
+ Ama ^= Da;
+ BCo = ROL(Ama, 41);
+ Ase ^= De;
+ BCu = ROL(Ase, 2);
+ Esa = BCa ^((~BCe)& BCi );
+ Ese = BCe ^((~BCi)& BCo );
+ Esi = BCi ^((~BCo)& BCu );
+ Eso = BCo ^((~BCu)& BCa );
+ Esu = BCu ^((~BCa)& BCe );
+
+ /* prepareTheta */
+ BCa = Eba^Ega^Eka^Ema^Esa;
+ BCe = Ebe^Ege^Eke^Eme^Ese;
+ BCi = Ebi^Egi^Eki^Emi^Esi;
+ BCo = Ebo^Ego^Eko^Emo^Eso;
+ BCu = Ebu^Egu^Eku^Emu^Esu;
+
+ /* thetaRhoPiChiIotaPrepareTheta(round+1, E, A) */
+ Da = BCu^ROL(BCe, 1);
+ De = BCa^ROL(BCi, 1);
+ Di = BCe^ROL(BCo, 1);
+ Do = BCi^ROL(BCu, 1);
+ Du = BCo^ROL(BCa, 1);
+
+ Eba ^= Da;
+ BCa = Eba;
+ Ege ^= De;
+ BCe = ROL(Ege, 44);
+ Eki ^= Di;
+ BCi = ROL(Eki, 43);
+ Emo ^= Do;
+ BCo = ROL(Emo, 21);
+ Esu ^= Du;
+ BCu = ROL(Esu, 14);
+ Aba = BCa ^((~BCe)& BCi );
+ Aba ^= (uint64_t)KeccakF_RoundConstants[round+1];
+ Abe = BCe ^((~BCi)& BCo );
+ Abi = BCi ^((~BCo)& BCu );
+ Abo = BCo ^((~BCu)& BCa );
+ Abu = BCu ^((~BCa)& BCe );
+
+ Ebo ^= Do;
+ BCa = ROL(Ebo, 28);
+ Egu ^= Du;
+ BCe = ROL(Egu, 20);
+ Eka ^= Da;
+ BCi = ROL(Eka, 3);
+ Eme ^= De;
+ BCo = ROL(Eme, 45);
+ Esi ^= Di;
+ BCu = ROL(Esi, 61);
+ Aga = BCa ^((~BCe)& BCi );
+ Age = BCe ^((~BCi)& BCo );
+ Agi = BCi ^((~BCo)& BCu );
+ Ago = BCo ^((~BCu)& BCa );
+ Agu = BCu ^((~BCa)& BCe );
+
+ Ebe ^= De;
+ BCa = ROL(Ebe, 1);
+ Egi ^= Di;
+ BCe = ROL(Egi, 6);
+ Eko ^= Do;
+ BCi = ROL(Eko, 25);
+ Emu ^= Du;
+ BCo = ROL(Emu, 8);
+ Esa ^= Da;
+ BCu = ROL(Esa, 18);
+ Aka = BCa ^((~BCe)& BCi );
+ Ake = BCe ^((~BCi)& BCo );
+ Aki = BCi ^((~BCo)& BCu );
+ Ako = BCo ^((~BCu)& BCa );
+ Aku = BCu ^((~BCa)& BCe );
+
+ Ebu ^= Du;
+ BCa = ROL(Ebu, 27);
+ Ega ^= Da;
+ BCe = ROL(Ega, 36);
+ Eke ^= De;
+ BCi = ROL(Eke, 10);
+ Emi ^= Di;
+ BCo = ROL(Emi, 15);
+ Eso ^= Do;
+ BCu = ROL(Eso, 56);
+ Ama = BCa ^((~BCe)& BCi );
+ Ame = BCe ^((~BCi)& BCo );
+ Ami = BCi ^((~BCo)& BCu );
+ Amo = BCo ^((~BCu)& BCa );
+ Amu = BCu ^((~BCa)& BCe );
+
+ Ebi ^= Di;
+ BCa = ROL(Ebi, 62);
+ Ego ^= Do;
+ BCe = ROL(Ego, 55);
+ Eku ^= Du;
+ BCi = ROL(Eku, 39);
+ Ema ^= Da;
+ BCo = ROL(Ema, 41);
+ Ese ^= De;
+ BCu = ROL(Ese, 2);
+ Asa = BCa ^((~BCe)& BCi );
+ Ase = BCe ^((~BCi)& BCo );
+ Asi = BCi ^((~BCo)& BCu );
+ Aso = BCo ^((~BCu)& BCa );
+ Asu = BCu ^((~BCa)& BCe );
+ }
+
+ /* copyToState(state, A) */
+ state[ 0] = Aba;
+ state[ 1] = Abe;
+ state[ 2] = Abi;
+ state[ 3] = Abo;
+ state[ 4] = Abu;
+ state[ 5] = Aga;
+ state[ 6] = Age;
+ state[ 7] = Agi;
+ state[ 8] = Ago;
+ state[ 9] = Agu;
+ state[10] = Aka;
+ state[11] = Ake;
+ state[12] = Aki;
+ state[13] = Ako;
+ state[14] = Aku;
+ state[15] = Ama;
+ state[16] = Ame;
+ state[17] = Ami;
+ state[18] = Amo;
+ state[19] = Amu;
+ state[20] = Asa;
+ state[21] = Ase;
+ state[22] = Asi;
+ state[23] = Aso;
+ state[24] = Asu;
+}
+
+static void keccak_absorb(uint64_t *s, unsigned int r, const unsigned char *m, unsigned long long int mlen,
+ unsigned char p)
+{
+ unsigned long long i;
+ unsigned char t[200];
+
+ while (mlen >= r) {
+ for (i = 0; i < r / 8; ++i)
+ s[i] ^= load64(m + 8 * i);
+
+ KeccakF1600_StatePermute(s);
+ mlen -= r;
+ m += r;
+ }
+
+ for (i = 0; i < r; ++i) {
+ t[i] = 0;
+ }
+ for (i = 0; i < mlen; ++i) {
+ t[i] = m[i];
+ }
+
+ t[i] = p;
+ t[r - 1] |= 128;
+
+ for (i = 0; i < r / 8; ++i) {
+ s[i] ^= load64(t + 8 * i);
+ }
+}
+
+static void keccak_squeezeblocks(unsigned char *h, unsigned long long int nblocks, uint64_t *s, unsigned int r)
+{
+ unsigned int i;
+
+ while(nblocks > 0) {
+ KeccakF1600_StatePermute(s);
+ for (i = 0; i < (r>>3); i++) {
+ store64(h+8*i, s[i]);
+ }
+
+ h += r;
+ nblocks--;
+ }
+}
+
+void shake256(unsigned char *output, unsigned long long outlen, const unsigned char *input, unsigned long long inlen)
+{
+ uint64_t s[25];
+ unsigned char t[SHAKE256_RATE];
+ unsigned long long nblocks = outlen / SHAKE256_RATE;
+ size_t i;
+
+ for (i = 0; i < 25; ++i) {
+ s[i] = 0;
+ }
+
+ /* Absorb input */
+ keccak_absorb(s, SHAKE256_RATE, input, inlen, 0x1F);
+
+ /* Squeeze output */
+ keccak_squeezeblocks(output, nblocks, s, SHAKE256_RATE);
+
+ output += nblocks * SHAKE256_RATE;
+ outlen -= nblocks * SHAKE256_RATE;
+
+ if (outlen) {
+ keccak_squeezeblocks(t, 1, s, SHAKE256_RATE);
+
+ for (i = 0; i < outlen; i++) {
+ output[i] = t[i];
+ }
+ }
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fips202.h b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fips202.h
new file mode 100644
index 0000000000..9dd237a491
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fips202.h
@@ -0,0 +1,23 @@
+/********************************************************************************************
+* SHA3-derived function SHAKE
+*
+* Based on the public domain implementation in crypto_hash/keccakc512/simple/
+* from http://bench.cr.yp.to/supercop.html by Ronny Van Keer
+* and the public domain "TweetFips202" implementation from https://twitter.com/tweetfips202
+* by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe
+*
+* See NIST Special Publication 800-185 for more information:
+* http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-185.pdf
+*
+*********************************************************************************************/
+
+#pragma once
+
+#include <stdint.h>
+#include "sikep434r3.h"
+
+#define SHAKE128_RATE 168
+#define SHAKE256_RATE 136
+
+#define shake256 S2N_SIKE_P434_R3_NAMESPACE(shake256)
+void shake256(unsigned char *output, unsigned long long outlen, const unsigned char *input, unsigned long long inlen);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp.c
new file mode 100644
index 0000000000..867ac0f6c1
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp.c
@@ -0,0 +1,297 @@
+/********************************************************************************************
+* Supersingular Isogeny Key Encapsulation Library
+*
+* Abstract: modular arithmetic for P434
+*********************************************************************************************/
+
+#include "sikep434r3.h"
+#include "pq-crypto/s2n_pq.h"
+#include "sikep434r3_fp.h"
+#include "sikep434r3_fpx.h"
+#include "sikep434r3_fp_x64_asm.h"
+
+/* Multiprecision subtraction with correction with 2*p, c = a-b+2p. */
+void mp_sub434_p2(const digit_t* a, const digit_t* b, digit_t* c)
+{
+#if defined(S2N_SIKE_P434_R3_ASM)
+ if (s2n_sikep434r3_asm_is_enabled()) {
+ mp_sub434_p2_asm(a, b, c);
+ return;
+ }
+#endif
+
+ unsigned int i, borrow = 0;
+
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ S2N_SIKE_P434_R3_SUBC(borrow, a[i], b[i], borrow, c[i]);
+ }
+
+ borrow = 0;
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ S2N_SIKE_P434_R3_ADDC(borrow, c[i], ((const digit_t*)p434x2)[i], borrow, c[i]);
+ }
+}
+
+/* Multiprecision subtraction with correction with 4*p, c = a-b+4p. */
+void mp_sub434_p4(const digit_t* a, const digit_t* b, digit_t* c)
+{
+#if defined(S2N_SIKE_P434_R3_ASM)
+ if (s2n_sikep434r3_asm_is_enabled()) {
+ mp_sub434_p4_asm(a, b, c);
+ return;
+ }
+#endif
+
+ unsigned int i, borrow = 0;
+
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ S2N_SIKE_P434_R3_SUBC(borrow, a[i], b[i], borrow, c[i]);
+ }
+
+ borrow = 0;
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ S2N_SIKE_P434_R3_ADDC(borrow, c[i], ((const digit_t*)p434x4)[i], borrow, c[i]);
+ }
+}
+
+/* Modular addition, c = a+b mod p434.
+ * Inputs: a, b in [0, 2*p434-1]
+ * Output: c in [0, 2*p434-1] */
+void fpadd434(const digit_t* a, const digit_t* b, digit_t* c)
+{
+#if defined(S2N_SIKE_P434_R3_ASM)
+ if (s2n_sikep434r3_asm_is_enabled()) {
+ fpadd434_asm(a, b, c);
+ return;
+ }
+#endif
+ unsigned int i, carry = 0;
+ digit_t mask;
+
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ S2N_SIKE_P434_R3_ADDC(carry, a[i], b[i], carry, c[i]);
+ }
+
+ carry = 0;
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ S2N_SIKE_P434_R3_SUBC(carry, c[i], ((const digit_t*)p434x2)[i], carry, c[i]);
+ }
+ mask = 0 - (digit_t)carry;
+
+ carry = 0;
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ S2N_SIKE_P434_R3_ADDC(carry, c[i], ((const digit_t*)p434x2)[i] & mask, carry, c[i]);
+ }
+}
+
+/* Modular subtraction, c = a-b mod p434.
+ * Inputs: a, b in [0, 2*p434-1]
+ * Output: c in [0, 2*p434-1] */
+void fpsub434(const digit_t* a, const digit_t* b, digit_t* c)
+{
+#if defined(S2N_SIKE_P434_R3_ASM)
+ if (s2n_sikep434r3_asm_is_enabled()) {
+ fpsub434_asm(a, b, c);
+ return;
+ }
+#endif
+
+ unsigned int i, borrow = 0;
+ digit_t mask;
+
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ S2N_SIKE_P434_R3_SUBC(borrow, a[i], b[i], borrow, c[i]);
+ }
+ mask = 0 - (digit_t)borrow;
+
+ borrow = 0;
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ S2N_SIKE_P434_R3_ADDC(borrow, c[i], ((const digit_t*)p434x2)[i] & mask, borrow, c[i]);
+ }
+}
+
+/* Modular negation, a = -a mod p434.
+ * Input/output: a in [0, 2*p434-1] */
+void fpneg434(digit_t* a)
+{
+ unsigned int i, borrow = 0;
+
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ S2N_SIKE_P434_R3_SUBC(borrow, ((const digit_t*)p434x2)[i], a[i], borrow, a[i]);
+ }
+}
+
+/* Modular division by two, c = a/2 mod p434.
+ * Input : a in [0, 2*p434-1]
+ * Output: c in [0, 2*p434-1] */
+void fpdiv2_434(const digit_t* a, digit_t* c)
+{
+ unsigned int i, carry = 0;
+ digit_t mask;
+
+ mask = 0 - (digit_t)(a[0] & 1); /* If a is odd compute a+p434 */
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ S2N_SIKE_P434_R3_ADDC(carry, a[i], ((const digit_t*)p434)[i] & mask, carry, c[i]);
+ }
+
+ mp_shiftr1(c, S2N_SIKE_P434_R3_NWORDS_FIELD);
+}
+
+/* Modular correction to reduce field element a in [0, 2*p434-1] to [0, p434-1]. */
+void fpcorrection434(digit_t* a)
+{
+ unsigned int i, borrow = 0;
+ digit_t mask;
+
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ S2N_SIKE_P434_R3_SUBC(borrow, a[i], ((const digit_t*)p434)[i], borrow, a[i]);
+ }
+ mask = 0 - (digit_t)borrow;
+
+ borrow = 0;
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ S2N_SIKE_P434_R3_ADDC(borrow, a[i], ((const digit_t*)p434)[i] & mask, borrow, a[i]);
+ }
+}
+
+/* Digit multiplication, digit * digit -> 2-digit result */
+void digit_x_digit(const digit_t a, const digit_t b, digit_t* c)
+{
+ register digit_t al, ah, bl, bh, temp;
+ digit_t albl, albh, ahbl, ahbh, res1, res2, res3, carry;
+ digit_t mask_low = (digit_t)(-1) >> (sizeof(digit_t)*4), mask_high = (digit_t)(-1) << (sizeof(digit_t)*4);
+
+ al = a & mask_low; /* Low part */
+ ah = a >> (sizeof(digit_t) * 4); /* High part */
+ bl = b & mask_low;
+ bh = b >> (sizeof(digit_t) * 4);
+
+ albl = al*bl;
+ albh = al*bh;
+ ahbl = ah*bl;
+ ahbh = ah*bh;
+ c[0] = albl & mask_low; /* C00 */
+
+ res1 = albl >> (sizeof(digit_t) * 4);
+ res2 = ahbl & mask_low;
+ res3 = albh & mask_low;
+ temp = res1 + res2 + res3;
+ carry = temp >> (sizeof(digit_t) * 4);
+ c[0] ^= temp << (sizeof(digit_t) * 4); /* C01 */
+
+ res1 = ahbl >> (sizeof(digit_t) * 4);
+ res2 = albh >> (sizeof(digit_t) * 4);
+ res3 = ahbh & mask_low;
+ temp = res1 + res2 + res3 + carry;
+ c[1] = temp & mask_low; /* C10 */
+ carry = temp & mask_high;
+ c[1] ^= (ahbh & mask_high) + carry; /* C11 */
+}
+
+/* Multiprecision comba multiply, c = a*b, where lng(a) = lng(b) = nwords. */
+void mp_mul(const digit_t* a, const digit_t* b, digit_t* c, const unsigned int nwords)
+{
+#if defined(S2N_SIKE_P434_R3_ASM)
+ if (s2n_sikep434r3_asm_is_enabled()) {
+ S2N_SIKE_P434_R3_UNREFERENCED_PARAMETER(nwords);
+ mul434_asm(a, b, c);
+ return;
+ }
+#endif
+
+ unsigned int i, j;
+ digit_t t = 0, u = 0, v = 0, UV[2];
+ unsigned int carry;
+
+ for (i = 0; i < nwords; i++) {
+ for (j = 0; j <= i; j++) {
+ S2N_SIKE_P434_R3_MUL(a[j], b[i-j], UV+1, UV[0]);
+ S2N_SIKE_P434_R3_ADDC(0, UV[0], v, carry, v);
+ S2N_SIKE_P434_R3_ADDC(carry, UV[1], u, carry, u);
+ t += carry;
+ }
+ c[i] = v;
+ v = u;
+ u = t;
+ t = 0;
+ }
+
+ for (i = nwords; i < 2*nwords-1; i++) {
+ for (j = i-nwords+1; j < nwords; j++) {
+ S2N_SIKE_P434_R3_MUL(a[j], b[i-j], UV+1, UV[0]);
+ S2N_SIKE_P434_R3_ADDC(0, UV[0], v, carry, v);
+ S2N_SIKE_P434_R3_ADDC(carry, UV[1], u, carry, u);
+ t += carry;
+ }
+ c[i] = v;
+ v = u;
+ u = t;
+ t = 0;
+ }
+ c[2*nwords-1] = v;
+}
+
+/* Efficient Montgomery reduction using comba and exploiting the special form of the prime p434.
+ * mc = ma*R^-1 mod p434x2, where R = 2^448.
+ * If ma < 2^448*p434, the output mc is in the range [0, 2*p434-1].
+ * ma is assumed to be in Montgomery representation. */
+void rdc_mont(digit_t* ma, digit_t* mc)
+{
+#if defined(S2N_SIKE_P434_R3_ASM)
+ if (s2n_sikep434r3_asm_is_enabled()) {
+ rdc434_asm(ma, mc);
+ return;
+ }
+#endif
+
+ unsigned int i, j, carry, count = S2N_SIKE_P434_R3_ZERO_WORDS;
+ digit_t UV[2], t = 0, u = 0, v = 0;
+
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ mc[i] = 0;
+ }
+
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ for (j = 0; j < i; j++) {
+ if (j < (i-S2N_SIKE_P434_R3_ZERO_WORDS+1)) {
+ S2N_SIKE_P434_R3_MUL(mc[j], ((const digit_t*)p434p1)[i-j], UV+1, UV[0]);
+ S2N_SIKE_P434_R3_ADDC(0, UV[0], v, carry, v);
+ S2N_SIKE_P434_R3_ADDC(carry, UV[1], u, carry, u);
+ t += carry;
+ }
+ }
+ S2N_SIKE_P434_R3_ADDC(0, v, ma[i], carry, v);
+ S2N_SIKE_P434_R3_ADDC(carry, u, 0, carry, u);
+ t += carry;
+ mc[i] = v;
+ v = u;
+ u = t;
+ t = 0;
+ }
+
+ for (i = S2N_SIKE_P434_R3_NWORDS_FIELD; i < 2*S2N_SIKE_P434_R3_NWORDS_FIELD-1; i++) {
+ if (count > 0) {
+ count -= 1;
+ }
+ for (j = i-S2N_SIKE_P434_R3_NWORDS_FIELD+1; j < S2N_SIKE_P434_R3_NWORDS_FIELD; j++) {
+ if (j < (S2N_SIKE_P434_R3_NWORDS_FIELD-count)) {
+ S2N_SIKE_P434_R3_MUL(mc[j], ((const digit_t*)p434p1)[i-j], UV+1, UV[0]);
+ S2N_SIKE_P434_R3_ADDC(0, UV[0], v, carry, v);
+ S2N_SIKE_P434_R3_ADDC(carry, UV[1], u, carry, u);
+ t += carry;
+ }
+ }
+ S2N_SIKE_P434_R3_ADDC(0, v, ma[i], carry, v);
+ S2N_SIKE_P434_R3_ADDC(carry, u, 0, carry, u);
+ t += carry;
+ mc[i-S2N_SIKE_P434_R3_NWORDS_FIELD] = v;
+ v = u;
+ u = t;
+ t = 0;
+ }
+
+ /* `carry` isn't read after this, but it's still a necessary argument to the macro */
+ /* cppcheck-suppress unreadVariable */
+ S2N_SIKE_P434_R3_ADDC(0, v, ma[2*S2N_SIKE_P434_R3_NWORDS_FIELD-1], carry, v);
+ mc[S2N_SIKE_P434_R3_NWORDS_FIELD-1] = v;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp.h b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp.h
new file mode 100644
index 0000000000..7844ba0457
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp.h
@@ -0,0 +1,39 @@
+/********************************************************************************************
+* Supersingular Isogeny Key Encapsulation Library
+*
+* Abstract: modular arithmetic for P434
+*********************************************************************************************/
+
+#pragma once
+
+#include "sikep434r3.h"
+
+#define mp_sub434_p2 S2N_SIKE_P434_R3_NAMESPACE(mp_sub434_p2)
+void mp_sub434_p2(const digit_t* a, const digit_t* b, digit_t* c);
+
+#define mp_sub434_p4 S2N_SIKE_P434_R3_NAMESPACE(mp_sub434_p4)
+void mp_sub434_p4(const digit_t* a, const digit_t* b, digit_t* c);
+
+#define fpadd434 S2N_SIKE_P434_R3_NAMESPACE(fpadd434)
+void fpadd434(const digit_t* a, const digit_t* b, digit_t* c);
+
+#define fpsub434 S2N_SIKE_P434_R3_NAMESPACE(fpsub434)
+void fpsub434(const digit_t* a, const digit_t* b, digit_t* c);
+
+#define fpneg434 S2N_SIKE_P434_R3_NAMESPACE(fpneg434)
+void fpneg434(digit_t* a);
+
+#define fpdiv2_434 S2N_SIKE_P434_R3_NAMESPACE(fpdiv2_434)
+void fpdiv2_434(const digit_t* a, digit_t* c);
+
+#define fpcorrection434 S2N_SIKE_P434_R3_NAMESPACE(fpcorrection434)
+void fpcorrection434(digit_t* a);
+
+#define digit_x_digit S2N_SIKE_P434_R3_NAMESPACE(digit_x_digit)
+void digit_x_digit(const digit_t a, const digit_t b, digit_t* c);
+
+#define mp_mul S2N_SIKE_P434_R3_NAMESPACE(mp_mul)
+void mp_mul(const digit_t* a, const digit_t* b, digit_t* c, const unsigned int nwords);
+
+#define rdc_mont S2N_SIKE_P434_R3_NAMESPACE(rdc_mont)
+void rdc_mont(digit_t* ma, digit_t* mc);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp_x64_asm.S b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp_x64_asm.S
new file mode 100644
index 0000000000..1814a8b25a
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp_x64_asm.S
@@ -0,0 +1,1054 @@
+//*******************************************************************************************
+// Supersingular Isogeny Key Encapsulation Library
+//
+// Abstract: field arithmetic in x64 assembly for P434 on Linux
+//*******************************************************************************************
+
+/* Requires bmi2 instruction set for mulx. adx instructions are optional, but preferred. */
+.intel_syntax noprefix
+
+#define S2N_SIKE_P434_R3_NAMESPACE(s) s2n_sike_p434_r3_##s
+
+// Registers that are used for parameter passing:
+#define reg_p1 rdi
+#define reg_p2 rsi
+#define reg_p3 rdx
+
+// Define addition instructions
+#ifdef S2N_ADX
+
+#define ADD1 adox
+#define ADC1 adox
+#define ADD2 adcx
+#define ADC2 adcx
+
+#else
+
+#define ADD1 add
+#define ADC1 adc
+#define ADD2 add
+#define ADC2 adc
+
+#endif
+
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+.text
+
+#define asm_p434 S2N_SIKE_P434_R3_NAMESPACE(asm_p434)
+.align 32
+.type asm_p434, @object
+.size asm_p434, 56
+asm_p434:
+.quad -1
+.quad -1
+.quad -1
+.quad -161717841442111489
+.quad 8918917783347572387
+.quad 7853257225132122198
+.quad 620258357900100
+
+
+#define asm_p434x2 S2N_SIKE_P434_R3_NAMESPACE(asm_p434x2)
+.align 32
+.type asm_p434x2, @object
+.size asm_p434x2, 56
+asm_p434x2:
+.quad -2
+.quad -1
+.quad -1
+.quad -323435682884222977
+.quad -608908507014406841
+.quad -2740229623445307220
+.quad 1240516715800200
+
+
+#define asm_p434x4 S2N_SIKE_P434_R3_NAMESPACE(asm_p434x4)
+.align 32
+.type asm_p434x4, @object
+.size asm_p434x4, 56
+asm_p434x4:
+.quad -4
+.quad -1
+.quad -1
+.quad -646871365768445953
+.quad -1217817014028813681
+.quad -5480459246890614439
+.quad 2481033431600401
+
+
+#define asm_p434p1 S2N_SIKE_P434_R3_NAMESPACE(asm_p434p1)
+.align 32
+.type asm_p434p1, @object
+.size asm_p434p1, 56
+asm_p434p1:
+.quad 0
+.quad 0
+.quad 0
+.quad -161717841442111488
+.quad 8918917783347572387
+.quad 7853257225132122198
+.quad 620258357900100
+
+//***********************************************************************
+// Field addition
+// Operation: c [reg_p3] = a [reg_p1] + b [reg_p2]
+//***********************************************************************
+#define fpadd434_asm S2N_SIKE_P434_R3_NAMESPACE(fpadd434_asm)
+.global fpadd434_asm
+fpadd434_asm:
+ push r12
+ push r13
+ push r14
+ push r15
+ push rbx
+ push rbp
+
+ xor rax, rax
+ mov r8, [reg_p1]
+ mov r9, [reg_p1+8]
+ mov r10, [reg_p1+16]
+ mov r11, [reg_p1+24]
+ mov r12, [reg_p1+32]
+ mov r13, [reg_p1+40]
+ mov r14, [reg_p1+48]
+ add r8, [reg_p2]
+ adc r9, [reg_p2+8]
+ adc r10, [reg_p2+16]
+ adc r11, [reg_p2+24]
+ adc r12, [reg_p2+32]
+ adc r13, [reg_p2+40]
+ adc r14, [reg_p2+48]
+
+ mov rbx, [rip+asm_p434x2]
+ sub r8, rbx
+ mov rcx, [rip+asm_p434x2+8]
+ sbb r9, rcx
+ sbb r10, rcx
+ mov rdi, [rip+asm_p434x2+24]
+ sbb r11, rdi
+ mov rsi, [rip+asm_p434x2+32]
+ sbb r12, rsi
+ mov rbp, [rip+asm_p434x2+40]
+ sbb r13, rbp
+ mov r15, [rip+asm_p434x2+48]
+ sbb r14, r15
+ sbb rax, 0
+
+ and rbx, rax
+ and rcx, rax
+ and rdi, rax
+ and rsi, rax
+ and rbp, rax
+ and r15, rax
+
+ add r8, rbx
+ adc r9, rcx
+ adc r10, rcx
+ adc r11, rdi
+ adc r12, rsi
+ adc r13, rbp
+ adc r14, r15
+ mov [reg_p3], r8
+ mov [reg_p3+8], r9
+ mov [reg_p3+16], r10
+ mov [reg_p3+24], r11
+ mov [reg_p3+32], r12
+ mov [reg_p3+40], r13
+ mov [reg_p3+48], r14
+
+ pop rbp
+ pop rbx
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+ ret
+
+//***********************************************************************
+// Field subtraction
+// Operation: c [reg_p3] = a [reg_p1] - b [reg_p2]
+//***********************************************************************
+#define fpsub434_asm S2N_SIKE_P434_R3_NAMESPACE(fpsub434_asm)
+.global fpsub434_asm
+fpsub434_asm:
+ push r12
+ push r13
+ push r14
+
+ xor rax, rax
+ mov r8, [reg_p1]
+ mov r9, [reg_p1+8]
+ mov r10, [reg_p1+16]
+ mov r11, [reg_p1+24]
+ mov r12, [reg_p1+32]
+ mov r13, [reg_p1+40]
+ mov r14, [reg_p1+48]
+ sub r8, [reg_p2]
+ sbb r9, [reg_p2+8]
+ sbb r10, [reg_p2+16]
+ sbb r11, [reg_p2+24]
+ sbb r12, [reg_p2+32]
+ sbb r13, [reg_p2+40]
+ sbb r14, [reg_p2+48]
+ sbb rax, 0
+
+ mov rcx, [rip+asm_p434x2]
+ mov rdi, [rip+asm_p434x2+8]
+ mov rsi, [rip+asm_p434x2+24]
+ and rcx, rax
+ and rdi, rax
+ and rsi, rax
+ add r8, rcx
+ adc r9, rdi
+ adc r10, rdi
+ adc r11, rsi
+ mov [reg_p3], r8
+ mov [reg_p3+8], r9
+ mov [reg_p3+16], r10
+ mov [reg_p3+24], r11
+ setc cl
+
+ mov r8, [rip+asm_p434x2+32]
+ mov rdi, [rip+asm_p434x2+40]
+ mov rsi, [rip+asm_p434x2+48]
+ and r8, rax
+ and rdi, rax
+ and rsi, rax
+ bt rcx, 0
+ adc r12, r8
+ adc r13, rdi
+ adc r14, rsi
+ mov [reg_p3+32], r12
+ mov [reg_p3+40], r13
+ mov [reg_p3+48], r14
+
+ pop r14
+ pop r13
+ pop r12
+ ret
+
+///////////////////////////////////////////////////////////////// MACRO
+.macro SUB434_PX P0
+ push r12
+ push r13
+
+ mov r8, [reg_p1]
+ mov r9, [reg_p1+8]
+ mov r10, [reg_p1+16]
+ mov r11, [reg_p1+24]
+ mov r12, [reg_p1+32]
+ mov r13, [reg_p1+40]
+ mov rcx, [reg_p1+48]
+ sub r8, [reg_p2]
+ sbb r9, [reg_p2+8]
+ sbb r10, [reg_p2+16]
+ sbb r11, [reg_p2+24]
+ sbb r12, [reg_p2+32]
+ sbb r13, [reg_p2+40]
+ sbb rcx, [reg_p2+48]
+
+ mov rax, [rip+\P0]
+ mov rdi, [rip+\P0+8]
+ mov rsi, [rip+\P0+24]
+ add r8, rax
+ mov rax, [rip+\P0+32]
+ adc r9, rdi
+ adc r10, rdi
+ adc r11, rsi
+ mov rdi, [rip+\P0+40]
+ mov rsi, [rip+\P0+48]
+ adc r12, rax
+ adc r13, rdi
+ adc rcx, rsi
+ mov [reg_p3], r8
+ mov [reg_p3+8], r9
+ mov [reg_p3+16], r10
+ mov [reg_p3+24], r11
+ mov [reg_p3+32], r12
+ mov [reg_p3+40], r13
+ mov [reg_p3+48], rcx
+
+ pop r13
+ pop r12
+.endm
+
+//***********************************************************************
+// Multiprecision subtraction with correction with 2*p434
+// Operation: c [reg_p3] = a [reg_p1] - b [reg_p2] + 2*p434
+//***********************************************************************
+#define mp_sub434_p2_asm S2N_SIKE_P434_R3_NAMESPACE(mp_sub434_p2_asm)
+.global mp_sub434_p2_asm
+mp_sub434_p2_asm:
+ SUB434_PX asm_p434x2
+ ret
+
+//***********************************************************************
+// Multiprecision subtraction with correction with 4*p434
+// Operation: c [reg_p3] = a [reg_p1] - b [reg_p2] + 4*p434
+//***********************************************************************
+#define mp_sub434_p4_asm S2N_SIKE_P434_R3_NAMESPACE(mp_sub434_p4_asm)
+.global mp_sub434_p4_asm
+mp_sub434_p4_asm:
+ SUB434_PX asm_p434x4
+ ret
+
+///////////////////////////////////////////////////////////////// MACRO
+// Schoolbook integer multiplication
+// Inputs: memory pointers M0 and M1
+// Outputs: memory pointer C and regs T1, T3, rax
+// Temps: regs T0:T6
+/////////////////////////////////////////////////////////////////
+#ifdef S2N_ADX
+
+.macro MUL192_SCHOOL M0, M1, C, T0, T1, T2, T3, T4, T5, T6
+ mov rdx, \M0
+ mulx \T0, \T1, \M1 // T0:T1 = A0*B0
+ mov \C, \T1 // C0_final
+ mulx \T1, \T2, 8\M1 // T1:T2 = A0*B1
+ xor rax, rax
+ adox \T0, \T2
+ mulx \T2, \T3, 16\M1 // T2:T3 = A0*B2
+ adox \T1, \T3
+
+ mov rdx, 8\M0
+ mulx \T3, \T4, \M1 // T3:T4 = A1*B0
+ adox \T2, rax
+ xor rax, rax
+ mulx \T5, \T6, 8\M1 // T5:T6 = A1*B1
+ adox \T4, \T0
+ mov 8\C, \T4 // C1_final
+ adcx \T3, \T6
+ mulx \T6, \T0, 16\M1 // T6:T0 = A1*B2
+ adox \T3, \T1
+ adcx \T5, \T0
+ adcx \T6, rax
+ adox \T5, \T2
+
+ mov rdx, 16\M0
+ mulx \T1, \T0, \M1 // T1:T0 = A2*B0
+ adox \T6, rax
+ xor rax, rax
+ mulx \T4, \T2, 8\M1 // T4:T2 = A2*B1
+ adox \T0, \T3
+ mov 16\C, \T0 // C2_final
+ adcx \T1, \T5
+ mulx \T0, \T3, 16\M1 // T0:T3 = A2*B2
+ adcx \T4, \T6
+ adcx \T0, rax
+ adox \T1, \T2
+ adox \T3, \T4
+ adox rax, \T0
+.endm
+
+///////////////////////////////////////////////////////////////// MACRO
+// Schoolbook integer multiplication
+// Inputs: memory pointers M0 and M1
+// Outputs: memory pointer C
+// Temps: regs T0:T9
+/////////////////////////////////////////////////////////////////
+.macro MUL256_SCHOOL M0, M1, C, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9
+ mov rdx, \M0
+ mulx \T0, \T1, \M1 // T0:T1 = A0*B0
+ mov \C, \T1 // C0_final
+ mulx \T1, \T2, 8\M1 // T1:T2 = A0*B1
+ xor rax, rax
+ adox \T0, \T2
+ mulx \T2, \T3, 16\M1 // T2:T3 = A0*B2
+ adox \T1, \T3
+ mulx \T3, \T4, 24\M1 // T3:T4 = A0*B3
+ adox \T2, \T4
+
+ mov rdx, 8\M0
+ mulx \T5, \T4, \M1 // T5:T4 = A1*B0
+ adox \T3, rax
+ xor rax, rax
+ mulx \T6, \T7, 8\M1 // T6:T7 = A1*B1
+ adox \T4, \T0
+ mov 8\C, \T4 // C1_final
+ adcx \T5, \T7
+ mulx \T7, \T8, 16\M1 // T7:T8 = A1*B2
+ adcx \T6, \T8
+ adox \T5, \T1
+ mulx \T8, \T9, 24\M1 // T8:T9 = A1*B3
+ adcx \T7, \T9
+ adcx \T8, rax
+ adox \T6, \T2
+
+ mov rdx, 16\M0
+ mulx \T1, \T0, \M1 // T1:T0 = A2*B0
+ adox \T7, \T3
+ adox \T8, rax
+ xor rax, rax
+ mulx \T2, \T3, 8\M1 // T2:T3 = A2*B1
+ adox \T0, \T5
+ mov 16\C, \T0 // C2_final
+ adcx \T1, \T3
+ mulx \T3, \T4, 16\M1 // T3:T4 = A2*B2
+ adcx \T2, \T4
+ adox \T1, \T6
+ mulx \T4,\T9, 24\M1 // T3:T4 = A2*B3
+ adcx \T3, \T9
+ mov rdx, 24\M0
+ adcx \T4, rax
+
+ adox \T2, \T7
+ adox \T3, \T8
+ adox \T4, rax
+
+ mulx \T5, \T0, \M1 // T5:T0 = A3*B0
+ xor rax, rax
+ mulx \T6, \T7, 8\M1 // T6:T7 = A3*B1
+ adcx \T5, \T7
+ adox \T1, \T0
+ mulx \T7, \T8, 16\M1 // T7:T8 = A3*B2
+ adcx \T6, \T8
+ adox \T2, \T5
+ mulx \T8, \T9, 24\M1 // T8:T9 = A3*B3
+ adcx \T7, \T9
+ adcx \T8, rax
+
+ adox \T3, \T6
+ adox \T4, \T7
+ adox \T8, rax
+ mov 24\C, \T1 // C3_final
+ mov 32\C, \T2 // C4_final
+ mov 40\C, \T3 // C5_final
+ mov 48\C, \T4 // C6_final
+ mov 56\C, \T8 // C7_final
+.endm
+
+#else // S2N_ADX
+
+.macro MUL192_SCHOOL M0, M1, C, T0, T1, T2, T3, T4, T5, T6
+ mov rdx, \M0
+ mulx \T0, \T1, \M1 // T0:T1 = A0*B0
+ mov \C, \T1 // C0_final
+ mulx \T1, \T2, 8\M1 // T1:T2 = A0*B1
+ add \T0, \T2
+ mulx \T2, \T3, 16\M1 // T2:T3 = A0*B2
+ adc \T1, \T3
+
+ mov rdx, 8\M0
+ mulx \T3, \T4, \M1 // T3:T4 = A1*B0
+ adc \T2, 0
+ mulx \T5, \T6, 8\M1 // T5:T6 = A1*B1
+ add \T4, \T0
+ mov 8\C, \T4 // C1_final
+ adc \T3, \T1
+ adc \T5, \T2
+ mulx \T2, \T1, 16\M1 // T2:T1 = A1*B2
+ adc \T2, 0
+
+ add \T3, \T6
+ adc \T5, \T1
+ adc \T2, 0
+
+ mov rdx, 16\M0
+ mulx \T1, \T0, \M1 // T1:T0 = A2*B0
+ add \T0, \T3
+ mov 16\C, \T0 // C2_final
+ mulx \T4, \T6, 8\M1 // T4:T6 = A2*B1
+ adc \T1, \T5
+ adc \T2, \T4
+ mulx rax, \T3, 16\M1 // rax:T3 = A2*B2
+ adc rax, 0
+ add \T1, \T6
+ adc \T3, \T2
+ adc rax, 0
+.endm
+
+.macro MUL256_SCHOOL M0, M1, C, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9
+ mov rdx, \M0
+ mulx \T0, \T1, \M1 // T0:T1 = A0*B0
+ mov \C, \T1 // C0_final
+ mulx \T1, \T2, 8\M1 // T1:T2 = A0*B1
+ add \T0, \T2
+ mulx \T2, \T3, 16\M1 // T2:T3 = A0*B2
+ adc \T1, \T3
+ mulx \T3, \T4, 24\M1 // T3:T4 = A0*B3
+ adc \T2, \T4
+ mov rdx, 8\M0
+ adc \T3, 0
+
+ mulx \T5, \T4, \M1 // T5:T4 = A1*B0
+ mulx \T6, \T7, 8\M1 // T6:T7 = A1*B1
+ add \T5, \T7
+ mulx \T7, \T8, 16\M1 // T7:T8 = A1*B2
+ adc \T6, \T8
+ mulx \T8, \T9, 24\M1 // T8:T9 = A1*B3
+ adc \T7, \T9
+ adc \T8, 0
+
+ add \T4, \T0
+ mov 8\C, \T4 // C1_final
+ adc \T5, \T1
+ adc \T6, \T2
+ adc \T7, \T3
+ mov rdx, 16\M0
+ adc \T8, 0
+
+ mulx \T1, \T0, \M1 // T1:T0 = A2*B0
+ mulx \T2, \T3, 8\M1 // T2:T3 = A2*B1
+ add \T1, \T3
+ mulx \T3, \T4, 16\M1 // T3:T4 = A2*B2
+ adc \T2, \T4
+ mulx \T4,\T9, 24\M1 // T3:T4 = A2*B3
+ adc \T3, \T9
+ mov rdx, 24\M0
+ adc \T4, 0
+
+ add \T0, \T5
+ mov 16\C, \T0 // C2_final
+ adc \T1, \T6
+ adc \T2, \T7
+ adc \T3, \T8
+ adc \T4, 0
+
+ mulx \T5, \T0, \M1 // T5:T0 = A3*B0
+ mulx \T6, \T7, 8\M1 // T6:T7 = A3*B1
+ add \T5, \T7
+ mulx \T7, \T8, 16\M1 // T7:T8 = A3*B2
+ adc \T6, \T8
+ mulx \T8, \T9, 24\M1 // T8:T9 = A3*B3
+ adc \T7, \T9
+ adc \T8, 0
+
+ add \T1, \T0
+ mov 24\C, \T1 // C3_final
+ adc \T2, \T5
+ mov 32\C, \T2 // C4_final
+ adc \T3, \T6
+ mov 40\C, \T3 // C5_final
+ adc \T4, \T7
+ mov 48\C, \T4 // C6_final
+ adc \T8, 0
+ mov 56\C, \T8 // C7_final
+.endm
+
+#endif // S2N_ADX
+
+//*****************************************************************************
+// 434-bit multiplication using Karatsuba (one level), schoolbook (one level)
+//*****************************************************************************
+#define mul434_asm S2N_SIKE_P434_R3_NAMESPACE(mul434_asm)
+.global mul434_asm
+mul434_asm:
+ push r12
+ push r13
+ push r14
+ push r15
+ mov rcx, reg_p3
+
+ // r8-r11 <- AH + AL, rax <- mask
+ xor rax, rax
+ mov r8, [reg_p1]
+ mov r9, [reg_p1+8]
+ mov r10, [reg_p1+16]
+ mov r11, [reg_p1+24]
+ push rbx
+ push rbp
+ sub rsp, 96
+ add r8, [reg_p1+32]
+ adc r9, [reg_p1+40]
+ adc r10, [reg_p1+48]
+ adc r11, 0
+ sbb rax, 0
+ mov [rsp], r8
+ mov [rsp+8], r9
+ mov [rsp+16], r10
+ mov [rsp+24], r11
+
+ // r12-r15 <- BH + BL, rbx <- mask
+ xor rbx, rbx
+ mov r12, [reg_p2]
+ mov r13, [reg_p2+8]
+ mov r14, [reg_p2+16]
+ mov r15, [reg_p2+24]
+ add r12, [reg_p2+32]
+ adc r13, [reg_p2+40]
+ adc r14, [reg_p2+48]
+ adc r15, 0
+ sbb rbx, 0
+ mov [rsp+32], r12
+ mov [rsp+40], r13
+ mov [rsp+48], r14
+ mov [rsp+56], r15
+
+ // r12-r15 <- masked (BH + BL)
+ and r12, rax
+ and r13, rax
+ and r14, rax
+ and r15, rax
+
+ // r8-r11 <- masked (AH + AL)
+ and r8, rbx
+ and r9, rbx
+ and r10, rbx
+ and r11, rbx
+
+ // r8-r11 <- masked (AH + AL) + masked (AH + AL)
+ add r8, r12
+ adc r9, r13
+ adc r10, r14
+ adc r11, r15
+ mov [rsp+64], r8
+ mov [rsp+72], r9
+ mov [rsp+80], r10
+ mov [rsp+88], r11
+
+ // [rsp] <- (AH+AL) x (BH+BL), low part
+ MUL256_SCHOOL [rsp], [rsp+32], [rsp], r8, r9, r10, r11, r12, r13, r14, r15, rbx, rbp
+
+ // [rcx] <- AL x BL
+ MUL256_SCHOOL [reg_p1], [reg_p2], [rcx], r8, r9, r10, r11, r12, r13, r14, r15, rbx, rbp // Result C0-C3
+
+ // [rcx+64], rbx, rbp, rax <- AH x BH
+ MUL192_SCHOOL [reg_p1+32], [reg_p2+32], [rcx+64], r8, rbx, r10, rbp, r12, r13, r14
+
+ // r8-r11 <- (AH+AL) x (BH+BL), final step
+ mov r8, [rsp+64]
+ mov r9, [rsp+72]
+ mov r10, [rsp+80]
+ mov r11, [rsp+88]
+ mov rdx, [rsp+32]
+ add r8, rdx
+ mov rdx, [rsp+40]
+ adc r9, rdx
+ mov rdx, [rsp+48]
+ adc r10, rdx
+ mov rdx, [rsp+56]
+ adc r11, rdx
+
+ // r8-r15 <- (AH+AL) x (BH+BL) - ALxBL
+ mov r12, [rsp]
+ mov r13, [rsp+8]
+ mov r14, [rsp+16]
+ mov r15, [rsp+24]
+ sub r12, [rcx]
+ sbb r13, [rcx+8]
+ sbb r14, [rcx+16]
+ sbb r15, [rcx+24]
+ sbb r8, [rcx+32]
+ sbb r9, [rcx+40]
+ sbb r10, [rcx+48]
+ sbb r11, [rcx+56]
+
+ // r8-r15 <- (AH+AL) x (BH+BL) - ALxBL - AHxBH
+ sub r12, [rcx+64]
+ sbb r13, [rcx+72]
+ sbb r14, [rcx+80]
+ sbb r15, rbx
+ sbb r8, rbp
+ sbb r9, rax
+ sbb r10, 0
+ sbb r11, 0
+
+ add r12, [rcx+32]
+ mov [rcx+32], r12 // Result C4-C7
+ adc r13, [rcx+40]
+ mov [rcx+40], r13
+ adc r14, [rcx+48]
+ mov [rcx+48], r14
+ adc r15, [rcx+56]
+ mov [rcx+56], r15
+ adc r8, [rcx+64]
+ mov [rcx+64], r8 // Result C8-C15
+ adc r9, [rcx+72]
+ mov [rcx+72], r9
+ adc r10, [rcx+80]
+ mov [rcx+80], r10
+ adc r11, rbx
+ mov [rcx+88], r11
+ adc rbp, 0
+ mov [rcx+96], rbp
+ adc rax, 0
+ mov [rcx+104], rax
+
+ add rsp, 96
+ pop rbp
+ pop rbx
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+ ret
+
+///////////////////////////////////////////////////////////////// MACRO
+// Schoolbook integer multiplication
+// Inputs: reg I0 and memory pointer M1
+// Outputs: regs T0:T4
+// Temps: regs T0:T5
+/////////////////////////////////////////////////////////////////
+.macro MUL64x256_SCHOOL I0, M1, T0, T1, T2, T3, T4, T5
+ mulx \T2, \T4, 8\M1
+ xor rax, rax
+ mulx \T3, \T5, 16\M1
+ ADD1 \T1, \T4 // T1 <- C1_final
+ ADC1 \T2, \T5 // T2 <- C2_final
+ mulx \T4, \T5, 24\M1
+ ADC1 \T3, \T5 // T3 <- C3_final
+ ADC1 \T4, rax // T4 <- C4_final
+.endm
+
+///////////////////////////////////////////////////////////////// MACRO
+// Schoolbook integer multiplication
+// Inputs: regs I0 and I1, and memory pointer M1
+// Outputs: regs T0:T5
+// Temps: regs T0:T5
+/////////////////////////////////////////////////////////////////
+#ifdef S2N_ADX
+
+.macro MUL128x256_SCHOOL I0, I1, M1, T0, T1, T2, T3, T4, T5
+ mulx \T2, \T4, 8\M1
+ xor rax, rax
+ mulx \T3, \T5, 16\M1
+ ADD1 \T1, \T4
+ ADC1 \T2, \T5
+ mulx \T4, \T5, 24\M1
+ ADC1 \T3, \T5
+ ADC1 \T4, rax
+
+ xor rax, rax
+ mov rdx, \I1
+ mulx \I1, \T5, \M1
+ ADD2 \T1, \T5 // T1 <- C1_final
+ ADC2 \T2, \I1
+ mulx \T5, \I1, 8\M1
+ ADC2 \T3, \T5
+ ADD1 \T2, \I1
+ mulx \T5, \I1, 16\M1
+ ADC2 \T4, \T5
+ ADC1 \T3, \I1
+ mulx \T5, \I1, 24\M1
+ ADC2 \T5, rax
+ ADC1 \T4, \I1
+ ADC1 \T5, rax
+.endm
+
+#else // S2N_ADX
+
+.macro MUL128x256_SCHOOL I0, I1, M1, T0, T1, T2, T3, T4, T5
+ mulx \T2, \T4, 8\M1
+ mulx \T3, \T5, 16\M1
+ add \T1, \T4
+ adc \T2, \T5
+ mulx \T4, \T5, 24\M1
+ adc \T3, \T5
+ adc \T4, 0
+
+ mov rdx, \I1
+ mulx \I1, \T5, \M1
+ add \T1, \T5 // T1 <- C1_final
+ adc \T2, \I1
+ mulx \T5, \I1, 8\M1
+ adc \T3, \T5
+ mulx \T5, rax, 16\M1
+ adc \T4, \T5
+ mulx \T5, rdx, 24\M1
+ adc \T5, 0
+ add \T2, \I1
+ adc \T3, rax
+ adc \T4, rdx
+ adc \T5, 0
+.endm
+
+#endif // S2N_ADX
+
+//**************************************************************************************
+// Montgomery reduction
+// Based on method described in Faz-Hernandez et al. https://eprint.iacr.org/2017/1015
+// Operation: c [reg_p2] = a [reg_p1]
+//**************************************************************************************
+#define rdc434_asm S2N_SIKE_P434_R3_NAMESPACE(rdc434_asm)
+.global rdc434_asm
+rdc434_asm:
+ push r14
+
+ // a[0-1] x p434p1_nz --> result: r8:r13
+ mov rdx, [reg_p1]
+ mov r14, [reg_p1+8]
+ mulx r9, r8, [rip+asm_p434p1+24] // result r8
+ push r12
+ push r13
+ push r15
+ push rbp
+ push rbx
+ MUL128x256_SCHOOL rdx, r14, [rip+asm_p434p1+24], r8, r9, r10, r11, r12, r13
+
+ mov rdx, [reg_p1+16]
+ mov rcx, [reg_p1+72]
+ add r8, [reg_p1+24]
+ adc r9, [reg_p1+32]
+ adc r10, [reg_p1+40]
+ adc r11, [reg_p1+48]
+ adc r12, [reg_p1+56]
+ adc r13, [reg_p1+64]
+ adc rcx, 0
+ mulx rbp, rbx, [rip+asm_p434p1+24] // result rbx
+ mov [reg_p2], r9
+ mov [reg_p2+8], r10
+ mov [reg_p2+16], r11
+ mov [reg_p2+24], r12
+ mov [reg_p2+32], r13
+ mov r9, [reg_p1+80]
+ mov r10, [reg_p1+88]
+ mov r11, [reg_p1+96]
+ mov rdi, [reg_p1+104]
+ adc r9, 0
+ adc r10, 0
+ adc r11, 0
+ adc rdi, 0
+
+ // a[2-3] x p434p1_nz --> result: rbx, rbp, r12:r15
+ MUL128x256_SCHOOL rdx, r8, [rip+asm_p434p1+24], rbx, rbp, r12, r13, r14, r15
+
+ mov rdx, [reg_p2]
+ add rbx, [reg_p2+8]
+ adc rbp, [reg_p2+16]
+ adc r12, [reg_p2+24]
+ adc r13, [reg_p2+32]
+ adc r14, rcx
+ mov rcx, 0
+ adc r15, r9
+ adc rcx, r10
+ mulx r9, r8, [rip+asm_p434p1+24] // result r8
+ mov [reg_p2], rbp
+ mov [reg_p2+8], r12
+ mov [reg_p2+16], r13
+ adc r11, 0
+ adc rdi, 0
+
+ // a[4-5] x p434p1_nz --> result: r8:r13
+ MUL128x256_SCHOOL rdx, rbx, [rip+asm_p434p1+24], r8, r9, r10, rbp, r12, r13
+
+ mov rdx, [reg_p2]
+ add r8, [reg_p2+8]
+ adc r9, [reg_p2+16]
+ adc r10, r14
+ adc rbp, r15
+ adc r12, rcx
+ adc r13, r11
+ adc rdi, 0
+ mulx r15, r14, [rip+asm_p434p1+24] // result r14
+ mov [reg_p2], r8 // Final result c0-c1
+ mov [reg_p2+8], r9
+
+ // a[6-7] x p434p1_nz --> result: r14:r15, r8:r9, r11
+ MUL64x256_SCHOOL rdx, [rip+asm_p434p1+24], r14, r15, r8, r9, r11, rcx
+
+ // Final result c2:c6
+ add r14, r10
+ adc r15, rbp
+ pop rbx
+ pop rbp
+ adc r8, r12
+ adc r9, r13
+ adc r11, rdi
+ mov [reg_p2+16], r14
+ mov [reg_p2+24], r15
+ pop r15
+ pop r13
+ mov [reg_p2+32], r8
+ mov [reg_p2+40], r9
+ mov [reg_p2+48], r11
+
+ pop r12
+ pop r14
+ ret
+
+//***********************************************************************
+// 434-bit multiprecision addition
+// Operation: c [reg_p3] = a [reg_p1] + b [reg_p2]
+//***********************************************************************
+#define mp_add434_asm S2N_SIKE_P434_R3_NAMESPACE(mp_add434_asm)
+.global mp_add434_asm
+mp_add434_asm:
+ mov r8, [reg_p1]
+ mov r9, [reg_p1+8]
+ mov r10, [reg_p1+16]
+ mov r11, [reg_p1+24]
+ add r8, [reg_p2]
+ adc r9, [reg_p2+8]
+ adc r10, [reg_p2+16]
+ adc r11, [reg_p2+24]
+ mov [reg_p3], r8
+ mov [reg_p3+8], r9
+ mov [reg_p3+16], r10
+ mov [reg_p3+24], r11
+
+ mov r8, [reg_p1+32]
+ mov r9, [reg_p1+40]
+ mov r10, [reg_p1+48]
+ adc r8, [reg_p2+32]
+ adc r9, [reg_p2+40]
+ adc r10, [reg_p2+48]
+ mov [reg_p3+32], r8
+ mov [reg_p3+40], r9
+ mov [reg_p3+48], r10
+ ret
+
+//***************************************************************************
+// 2x434-bit multiprecision subtraction/addition
+// Operation: c [reg_p3] = a [reg_p1] - b [reg_p2]. If c < 0, add p434*2^448
+//***************************************************************************
+#define mp_subadd434x2_asm S2N_SIKE_P434_R3_NAMESPACE(mp_subadd434x2_asm)
+.global mp_subadd434x2_asm
+mp_subadd434x2_asm:
+ push r12
+ push r13
+ push r14
+ push r15
+ xor rax, rax
+ mov r8, [reg_p1]
+ mov r9, [reg_p1+8]
+ mov r10, [reg_p1+16]
+ mov r11, [reg_p1+24]
+ mov r12, [reg_p1+32]
+ sub r8, [reg_p2]
+ sbb r9, [reg_p2+8]
+ sbb r10, [reg_p2+16]
+ sbb r11, [reg_p2+24]
+ sbb r12, [reg_p2+32]
+ mov [reg_p3], r8
+ mov [reg_p3+8], r9
+ mov [reg_p3+16], r10
+ mov [reg_p3+24], r11
+ mov [reg_p3+32], r12
+
+ mov r8, [reg_p1+40]
+ mov r9, [reg_p1+48]
+ mov r10, [reg_p1+56]
+ mov r11, [reg_p1+64]
+ mov r12, [reg_p1+72]
+ sbb r8, [reg_p2+40]
+ sbb r9, [reg_p2+48]
+ sbb r10, [reg_p2+56]
+ sbb r11, [reg_p2+64]
+ sbb r12, [reg_p2+72]
+ mov [reg_p3+40], r8
+ mov [reg_p3+48], r9
+ mov [reg_p3+56], r10
+
+ mov r13, [reg_p1+80]
+ mov r14, [reg_p1+88]
+ mov r15, [reg_p1+96]
+ mov rcx, [reg_p1+104]
+ sbb r13, [reg_p2+80]
+ sbb r14, [reg_p2+88]
+ sbb r15, [reg_p2+96]
+ sbb rcx, [reg_p2+104]
+ sbb rax, 0
+
+ // Add p434 anded with the mask in rax
+ mov r8, [rip+asm_p434]
+ mov r9, [rip+asm_p434+24]
+ mov r10, [rip+asm_p434+32]
+ mov rdi, [rip+asm_p434+40]
+ mov rsi, [rip+asm_p434+48]
+ and r8, rax
+ and r9, rax
+ and r10, rax
+ and rdi, rax
+ and rsi, rax
+ mov rax, [reg_p3+56]
+ add rax, r8
+ adc r11, r8
+ adc r12, r8
+ adc r13, r9
+ adc r14, r10
+ adc r15, rdi
+ adc rcx, rsi
+
+ mov [reg_p3+56], rax
+ mov [reg_p3+64], r11
+ mov [reg_p3+72], r12
+ mov [reg_p3+80], r13
+ mov [reg_p3+88], r14
+ mov [reg_p3+96], r15
+ mov [reg_p3+104], rcx
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+ ret
+
+//***********************************************************************
+// Double 2x434-bit multiprecision subtraction
+// Operation: c [reg_p3] = c [reg_p3] - a [reg_p1] - b [reg_p2]
+//***********************************************************************
+#define mp_dblsub434x2_asm S2N_SIKE_P434_R3_NAMESPACE(mp_dblsub434x2_asm)
+.global mp_dblsub434x2_asm
+mp_dblsub434x2_asm:
+ push r12
+ push r13
+ push r14
+
+ mov r8, [reg_p3]
+ mov r9, [reg_p3+8]
+ mov r10, [reg_p3+16]
+ mov r11, [reg_p3+24]
+ mov r12, [reg_p3+32]
+ mov r13, [reg_p3+40]
+ mov r14, [reg_p3+48]
+ sub r8, [reg_p1]
+ sbb r9, [reg_p1+8]
+ sbb r10, [reg_p1+16]
+ sbb r11, [reg_p1+24]
+ sbb r12, [reg_p1+32]
+ sbb r13, [reg_p1+40]
+ sbb r14, [reg_p1+48]
+ setc al
+ sub r8, [reg_p2]
+ sbb r9, [reg_p2+8]
+ sbb r10, [reg_p2+16]
+ sbb r11, [reg_p2+24]
+ sbb r12, [reg_p2+32]
+ sbb r13, [reg_p2+40]
+ sbb r14, [reg_p2+48]
+ setc cl
+ mov [reg_p3], r8
+ mov [reg_p3+8], r9
+ mov [reg_p3+16], r10
+ mov [reg_p3+24], r11
+ mov [reg_p3+32], r12
+ mov [reg_p3+40], r13
+ mov [reg_p3+48], r14
+
+ mov r8, [reg_p3+56]
+ mov r9, [reg_p3+64]
+ mov r10, [reg_p3+72]
+ mov r11, [reg_p3+80]
+ mov r12, [reg_p3+88]
+ mov r13, [reg_p3+96]
+ mov r14, [reg_p3+104]
+ bt rax, 0
+ sbb r8, [reg_p1+56]
+ sbb r9, [reg_p1+64]
+ sbb r10, [reg_p1+72]
+ sbb r11, [reg_p1+80]
+ sbb r12, [reg_p1+88]
+ sbb r13, [reg_p1+96]
+ sbb r14, [reg_p1+104]
+ bt rcx, 0
+ sbb r8, [reg_p2+56]
+ sbb r9, [reg_p2+64]
+ sbb r10, [reg_p2+72]
+ sbb r11, [reg_p2+80]
+ sbb r12, [reg_p2+88]
+ sbb r13, [reg_p2+96]
+ sbb r14, [reg_p2+104]
+ mov [reg_p3+56], r8
+ mov [reg_p3+64], r9
+ mov [reg_p3+72], r10
+ mov [reg_p3+80], r11
+ mov [reg_p3+88], r12
+ mov [reg_p3+96], r13
+ mov [reg_p3+104], r14
+
+ pop r14
+ pop r13
+ pop r12
+ ret
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp_x64_asm.h b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp_x64_asm.h
new file mode 100644
index 0000000000..1753e25fb4
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fp_x64_asm.h
@@ -0,0 +1,38 @@
+/********************************************************************************************
+* Supersingular Isogeny Key Encapsulation Library
+*
+* Abstract: x86_64 assembly optimized modular arithmetic for P434
+*********************************************************************************************/
+
+#pragma once
+
+#if defined(S2N_SIKE_P434_R3_ASM)
+
+#define fpadd434_asm S2N_SIKE_P434_R3_NAMESPACE(fpadd434_asm)
+void fpadd434_asm(const digit_t* a, const digit_t* b, digit_t* c);
+
+#define fpsub434_asm S2N_SIKE_P434_R3_NAMESPACE(fpsub434_asm)
+void fpsub434_asm(const digit_t* a, const digit_t* b, digit_t* c);
+
+#define mul434_asm S2N_SIKE_P434_R3_NAMESPACE(mul434_asm)
+void mul434_asm(const digit_t* a, const digit_t* b, digit_t* c);
+
+#define rdc434_asm S2N_SIKE_P434_R3_NAMESPACE(rdc434_asm)
+void rdc434_asm(digit_t* ma, digit_t* mc);
+
+#define mp_add434_asm S2N_SIKE_P434_R3_NAMESPACE(mp_add434_asm)
+void mp_add434_asm(const digit_t* a, const digit_t* b, digit_t* c);
+
+#define mp_subadd434x2_asm S2N_SIKE_P434_R3_NAMESPACE(mp_subadd434x2_asm)
+void mp_subadd434x2_asm(const digit_t* a, const digit_t* b, digit_t* c);
+
+#define mp_dblsub434x2_asm S2N_SIKE_P434_R3_NAMESPACE(mp_dblsub434x2_asm)
+void mp_dblsub434x2_asm(const digit_t* a, const digit_t* b, digit_t* c);
+
+#define mp_sub434_p2_asm S2N_SIKE_P434_R3_NAMESPACE(mp_sub434_p2_asm)
+void mp_sub434_p2_asm(const digit_t* a, const digit_t* b, digit_t* c);
+
+#define mp_sub434_p4_asm S2N_SIKE_P434_R3_NAMESPACE(mp_sub434_p4_asm)
+void mp_sub434_p4_asm(const digit_t* a, const digit_t* b, digit_t* c);
+
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fpx.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fpx.c
new file mode 100644
index 0000000000..40c61144e4
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fpx.c
@@ -0,0 +1,478 @@
+/********************************************************************************************
+* Supersingular Isogeny Key Encapsulation Library
+*
+* Abstract: core functions over GF(p) and GF(p^2)
+*********************************************************************************************/
+
+#include <string.h>
+#include "sikep434r3.h"
+#include "sikep434r3_fp.h"
+#include "sikep434r3_fpx.h"
+#include "pq-crypto/s2n_pq.h"
+#include "sikep434r3_fp_x64_asm.h"
+
+static void fpmul_mont(const felm_t ma, const felm_t mb, felm_t mc);
+static void to_mont(const felm_t a, felm_t mc);
+static void from_mont(const felm_t ma, felm_t c);
+static void fpsqr_mont(const felm_t ma, felm_t mc);
+static unsigned int mp_sub(const digit_t* a, const digit_t* b, digit_t* c, const unsigned int nwords);
+static void fpinv_chain_mont(felm_t a);
+static void fpinv_mont(felm_t a);
+static void to_fp2mont(const f2elm_t *a, f2elm_t *mc);
+static void from_fp2mont(const f2elm_t *ma, f2elm_t *c);
+
+/* Encoding digits to bytes according to endianness */
+__inline static void encode_to_bytes(const digit_t* x, unsigned char* enc, int nbytes)
+{
+ if (is_big_endian()) {
+ int ndigits = nbytes / sizeof(digit_t);
+ int rem = nbytes % sizeof(digit_t);
+
+ for (int i = 0; i < ndigits; i++) {
+ digit_t temp = S2N_SIKE_P434_R3_BSWAP_DIGIT(x[i]);
+ memcpy(enc + (i * sizeof(digit_t)), (unsigned char *)&temp, sizeof(digit_t));
+ }
+
+ if (rem) {
+ digit_t ld = S2N_SIKE_P434_R3_BSWAP_DIGIT(x[ndigits]);
+ memcpy(enc + ndigits * sizeof(digit_t), (unsigned char *) &ld, rem);
+ }
+ } else {
+ memcpy(enc, (const unsigned char *) x, nbytes);
+ }
+}
+
+/* Conversion of GF(p^2) element from Montgomery to standard representation,
+ * and encoding by removing leading 0 bytes */
+void fp2_encode(const f2elm_t *x, unsigned char *enc)
+{
+ f2elm_t t;
+
+ from_fp2mont(x, &t);
+ encode_to_bytes(t.e[0], enc, S2N_SIKE_P434_R3_FP2_ENCODED_BYTES / 2);
+ encode_to_bytes(t.e[1], enc + S2N_SIKE_P434_R3_FP2_ENCODED_BYTES / 2, S2N_SIKE_P434_R3_FP2_ENCODED_BYTES / 2);
+}
+
+/* Parse byte sequence back into GF(p^2) element, and conversion to Montgomery representation */
+void fp2_decode(const unsigned char *x, f2elm_t *dec)
+{
+ decode_to_digits(x, dec->e[0], S2N_SIKE_P434_R3_FP2_ENCODED_BYTES / 2, S2N_SIKE_P434_R3_NWORDS_FIELD);
+ decode_to_digits(x + S2N_SIKE_P434_R3_FP2_ENCODED_BYTES / 2, dec->e[1], S2N_SIKE_P434_R3_FP2_ENCODED_BYTES / 2, S2N_SIKE_P434_R3_NWORDS_FIELD);
+ to_fp2mont(dec, dec);
+}
+
+/* Multiprecision multiplication, c = a*b mod p. */
+static void fpmul_mont(const felm_t ma, const felm_t mb, felm_t mc)
+{
+ dfelm_t temp = {0};
+
+ mp_mul(ma, mb, temp, S2N_SIKE_P434_R3_NWORDS_FIELD);
+ rdc_mont(temp, mc);
+}
+
+/* Conversion to Montgomery representation,
+ * mc = a*R^2*R^(-1) mod p = a*R mod p, where a in [0, p-1].
+ * The Montgomery constant R^2 mod p is the global value "Montgomery_R2". */
+static void to_mont(const felm_t a, felm_t mc)
+{
+ fpmul_mont(a, (const digit_t*)&Montgomery_R2, mc);
+}
+
+/* Conversion from Montgomery representation to standard representation,
+ * c = ma*R^(-1) mod p = a mod p, where ma in [0, p-1]. */
+static void from_mont(const felm_t ma, felm_t c)
+{
+ digit_t one[S2N_SIKE_P434_R3_NWORDS_FIELD] = {0};
+
+ one[0] = 1;
+ fpmul_mont(ma, one, c);
+ fpcorrection434(c);
+}
+
+/* Copy wordsize digits, c = a, where lng(a) = nwords. */
+void copy_words(const digit_t* a, digit_t* c, const unsigned int nwords)
+{
+ unsigned int i;
+
+ for (i = 0; i < nwords; i++) {
+ c[i] = a[i];
+ }
+}
+
+/* Multiprecision squaring, c = a^2 mod p. */
+static void fpsqr_mont(const felm_t ma, felm_t mc)
+{
+ dfelm_t temp = {0};
+
+ mp_mul(ma, ma, temp, S2N_SIKE_P434_R3_NWORDS_FIELD);
+ rdc_mont(temp, mc);
+}
+
+/* Copy a GF(p^2) element, c = a. */
+void fp2copy(const f2elm_t *a, f2elm_t *c)
+{
+ fpcopy(a->e[0], c->e[0]);
+ fpcopy(a->e[1], c->e[1]);
+}
+
+/* GF(p^2) division by two, c = a/2 in GF(p^2). */
+void fp2div2(const f2elm_t *a, f2elm_t *c)
+{
+ fpdiv2_434(a->e[0], c->e[0]);
+ fpdiv2_434(a->e[1], c->e[1]);
+}
+
+/* Multiprecision addition, c = a+b, where lng(a) = lng(b) = nwords. Returns the carry bit. */
+unsigned int mp_add(const digit_t* a, const digit_t* b, digit_t* c, const unsigned int nwords)
+{
+ unsigned int i, carry = 0;
+
+ for (i = 0; i < nwords; i++) {
+ S2N_SIKE_P434_R3_ADDC(carry, a[i], b[i], carry, c[i]);
+ }
+
+ return carry;
+}
+
+/* GF(p^2) squaring using Montgomery arithmetic, c = a^2 in GF(p^2).
+ * Inputs: a = a0+a1*i, where a0, a1 are in [0, 2*p-1]
+ * Output: c = c0+c1*i, where c0, c1 are in [0, 2*p-1] */
+void fp2sqr_mont(const f2elm_t *a, f2elm_t *c)
+{
+ felm_t t1, t2, t3;
+
+ mp_addfast(a->e[0], a->e[1], t1); /* t1 = a0+a1 */
+ mp_sub434_p4(a->e[0], a->e[1], t2); /* t2 = a0-a1 */
+ mp_addfast(a->e[0], a->e[0], t3); /* t3 = 2a0 */
+ fpmul_mont(t1, t2, c->e[0]); /* c0 = (a0+a1)(a0-a1) */
+ fpmul_mont(t3, a->e[1], c->e[1]); /* c1 = 2a0*a1 */
+}
+
+/* Multiprecision subtraction, c = a-b, where lng(a) = lng(b) = nwords. Returns the borrow bit. */
+static unsigned int mp_sub(const digit_t* a, const digit_t* b, digit_t* c, const unsigned int nwords)
+{
+ unsigned int i, borrow = 0;
+
+ for (i = 0; i < nwords; i++) {
+ S2N_SIKE_P434_R3_SUBC(borrow, a[i], b[i], borrow, c[i]);
+ }
+
+ return borrow;
+}
+
+/* Multiprecision subtraction followed by addition with p*2^S2N_SIKE_P434_R3_MAXBITS_FIELD,
+ * c = a-b+(p*2^S2N_SIKE_P434_R3_MAXBITS_FIELD) if a-b < 0, otherwise c=a-b. */
+__inline static void mp_subaddfast(const digit_t* a, const digit_t* b, digit_t* c)
+{
+#if defined(S2N_SIKE_P434_R3_ASM)
+ if (s2n_sikep434r3_asm_is_enabled()) {
+ mp_subadd434x2_asm(a, b, c);
+ return;
+ }
+#endif
+
+ felm_t t1;
+
+ digit_t mask = 0 - (digit_t)mp_sub(a, b, c, 2*S2N_SIKE_P434_R3_NWORDS_FIELD);
+ for (int i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ t1[i] = ((const digit_t *) p434)[i] & mask;
+ }
+ mp_addfast((digit_t*)&c[S2N_SIKE_P434_R3_NWORDS_FIELD], t1, (digit_t*)&c[S2N_SIKE_P434_R3_NWORDS_FIELD]);
+}
+
+/* Multiprecision subtraction, c = c-a-b, where lng(a) = lng(b) = 2*S2N_SIKE_P434_R3_NWORDS_FIELD. */
+__inline static void mp_dblsubfast(const digit_t* a, const digit_t* b, digit_t* c)
+{
+#if defined(S2N_SIKE_P434_R3_ASM)
+ if (s2n_sikep434r3_asm_is_enabled()) {
+ mp_dblsub434x2_asm(a, b, c);
+ return;
+ }
+#endif
+
+ mp_sub(c, a, c, 2*S2N_SIKE_P434_R3_NWORDS_FIELD);
+ mp_sub(c, b, c, 2*S2N_SIKE_P434_R3_NWORDS_FIELD);
+}
+
+/* GF(p^2) multiplication using Montgomery arithmetic, c = a*b in GF(p^2).
+ * Inputs: a = a0+a1*i and b = b0+b1*i, where a0, a1, b0, b1 are in [0, 2*p-1]
+ * Output: c = c0+c1*i, where c0, c1 are in [0, 2*p-1] */
+void fp2mul_mont(const f2elm_t *a, const f2elm_t *b, f2elm_t *c)
+{
+ felm_t t1, t2;
+ dfelm_t tt1, tt2, tt3;
+
+ mp_addfast(a->e[0], a->e[1], t1); /* t1 = a0+a1 */
+ mp_addfast(b->e[0], b->e[1], t2); /* t2 = b0+b1 */
+ mp_mul(a->e[0], b->e[0], tt1, S2N_SIKE_P434_R3_NWORDS_FIELD); /* tt1 = a0*b0 */
+ mp_mul(a->e[1], b->e[1], tt2, S2N_SIKE_P434_R3_NWORDS_FIELD); /* tt2 = a1*b1 */
+ mp_mul(t1, t2, tt3, S2N_SIKE_P434_R3_NWORDS_FIELD); /* tt3 = (a0+a1)*(b0+b1) */
+ mp_dblsubfast(tt1, tt2, tt3); /* tt3 = (a0+a1)*(b0+b1) - a0*b0 - a1*b1 */
+ mp_subaddfast(tt1, tt2, tt1); /* tt1 = a0*b0 - a1*b1 + p*2^S2N_SIKE_P434_R3_MAXBITS_FIELD if a0*b0 - a1*b1 < 0, else tt1 = a0*b0 - a1*b1 */
+ rdc_mont(tt3, c->e[1]); /* c[1] = (a0+a1)*(b0+b1) - a0*b0 - a1*b1 */
+ rdc_mont(tt1, c->e[0]); /* c[0] = a0*b0 - a1*b1 */
+}
+
+/* Chain to compute a^(p-3)/4 using Montgomery arithmetic. */
+static void fpinv_chain_mont(felm_t a)
+{
+ unsigned int i, j;
+ felm_t t[31], tt;
+
+ /* Precomputed table */
+ fpsqr_mont(a, tt);
+ fpmul_mont(a, tt, t[0]);
+ for (i = 0; i <= 29; i++) {
+ fpmul_mont(t[i], tt, t[i + 1]);
+ }
+
+ fpcopy(a, tt);
+ for (i = 0; i < 7; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[5], tt, tt);
+ for (i = 0; i < 10; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[14], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[3], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[23], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[13], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[24], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[7], tt, tt);
+ for (i = 0; i < 8; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[12], tt, tt);
+ for (i = 0; i < 8; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[30], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[1], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[30], tt, tt);
+ for (i = 0; i < 7; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[21], tt, tt);
+ for (i = 0; i < 9; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[2], tt, tt);
+ for (i = 0; i < 9; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[19], tt, tt);
+ for (i = 0; i < 9; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[1], tt, tt);
+ for (i = 0; i < 7; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[24], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[26], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[16], tt, tt);
+ for (i = 0; i < 7; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[10], tt, tt);
+ for (i = 0; i < 7; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[6], tt, tt);
+ for (i = 0; i < 7; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[0], tt, tt);
+ for (i = 0; i < 9; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[20], tt, tt);
+ for (i = 0; i < 8; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[9], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[25], tt, tt);
+ for (i = 0; i < 9; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[30], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[26], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(a, tt, tt);
+ for (i = 0; i < 7; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[28], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[6], tt, tt);
+ for (i = 0; i < 6; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[10], tt, tt);
+ for (i = 0; i < 9; i++) {
+ fpsqr_mont(tt, tt);
+ }
+ fpmul_mont(t[22], tt, tt);
+ for (j = 0; j < 35; j++) {
+ for (i = 0; i < 6; i++) fpsqr_mont(tt, tt);
+ fpmul_mont(t[30], tt, tt);
+ }
+ fpcopy(tt, a);
+}
+
+/* Field inversion using Montgomery arithmetic, a = a^(-1)*R mod p. */
+static void fpinv_mont(felm_t a)
+{
+ felm_t tt;
+
+ fpcopy(a, tt);
+ fpinv_chain_mont(tt);
+ fpsqr_mont(tt, tt);
+ fpsqr_mont(tt, tt);
+ fpmul_mont(a, tt, a);
+}
+
+/* GF(p^2) inversion using Montgomery arithmetic, a = (a0-i*a1)/(a0^2+a1^2). */
+void fp2inv_mont(f2elm_t *a)
+{
+ f2elm_t t1;
+
+ fpsqr_mont(a->e[0], t1.e[0]); /* t10 = a0^2 */
+ fpsqr_mont(a->e[1], t1.e[1]); /* t11 = a1^2 */
+ fpadd434(t1.e[0], t1.e[1], t1.e[0]); /* t10 = a0^2+a1^2 */
+ fpinv_mont(t1.e[0]); /* t10 = (a0^2+a1^2)^-1 */
+ fpneg434(a->e[1]); /* a = a0-i*a1 */
+ fpmul_mont(a->e[0], t1.e[0], a->e[0]);
+ fpmul_mont(a->e[1], t1.e[0], a->e[1]); /* a = (a0-i*a1)*(a0^2+a1^2)^-1 */
+}
+
+/* Conversion of a GF(p^2) element to Montgomery representation,
+ * mc_i = a_i*R^2*R^(-1) = a_i*R in GF(p^2). */
+static void to_fp2mont(const f2elm_t *a, f2elm_t *mc)
+{
+ to_mont(a->e[0], mc->e[0]);
+ to_mont(a->e[1], mc->e[1]);
+}
+
+/* Conversion of a GF(p^2) element from Montgomery representation to standard representation,
+ * c_i = ma_i*R^(-1) = a_i in GF(p^2). */
+static void from_fp2mont(const f2elm_t *ma, f2elm_t *c)
+{
+ from_mont(ma->e[0], c->e[0]);
+ from_mont(ma->e[1], c->e[1]);
+}
+
+/* Multiprecision right shift by one. */
+void mp_shiftr1(digit_t* x, const unsigned int nwords)
+{
+ unsigned int i;
+
+ for (i = 0; i < nwords-1; i++) {
+ S2N_SIKE_P434_R3_SHIFTR(x[i+1], x[i], 1, x[i], S2N_SIKE_P434_R3_RADIX);
+ }
+ x[nwords-1] >>= 1;
+}
+
+void decode_to_digits(const unsigned char* x, digit_t* dec, int nbytes, int ndigits)
+{
+ dec[ndigits - 1] = 0;
+ memcpy((unsigned char*)dec, x, nbytes);
+
+ if (is_big_endian()) {
+ for (int i = 0; i < ndigits; i++) {
+ dec[i] = S2N_SIKE_P434_R3_BSWAP_DIGIT(dec[i]);
+ }
+ }
+}
+
+void fpcopy(const felm_t a, felm_t c)
+{
+ unsigned int i;
+
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ c[i] = a[i];
+ }
+}
+
+void fpzero(felm_t a)
+{
+ unsigned int i;
+
+ for (i = 0; i < S2N_SIKE_P434_R3_NWORDS_FIELD; i++) {
+ a[i] = 0;
+ }
+}
+
+void fp2add(const f2elm_t *a, const f2elm_t *b, f2elm_t *c)
+{
+ fpadd434(a->e[0], b->e[0], c->e[0]);
+ fpadd434(a->e[1], b->e[1], c->e[1]);
+}
+
+void fp2sub(const f2elm_t *a, const f2elm_t *b, f2elm_t *c)
+{
+ fpsub434(a->e[0], b->e[0], c->e[0]);
+ fpsub434(a->e[1], b->e[1], c->e[1]);
+}
+
+void mp_addfast(const digit_t* a, const digit_t* b, digit_t* c)
+{
+#if defined(S2N_SIKE_P434_R3_ASM)
+ if (s2n_sikep434r3_asm_is_enabled()) {
+ mp_add434_asm(a, b, c);
+ return;
+ }
+#endif
+
+ mp_add(a, b, c, S2N_SIKE_P434_R3_NWORDS_FIELD);
+}
+
+void mp2_add(const f2elm_t *a, const f2elm_t *b, f2elm_t *c)
+{
+ mp_addfast(a->e[0], b->e[0], c->e[0]);
+ mp_addfast(a->e[1], b->e[1], c->e[1]);
+}
+
+void mp2_sub_p2(const f2elm_t *a, const f2elm_t *b, f2elm_t *c)
+{
+ mp_sub434_p2(a->e[0], b->e[0], c->e[0]);
+ mp_sub434_p2(a->e[1], b->e[1], c->e[1]);
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fpx.h b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fpx.h
new file mode 100644
index 0000000000..bce1849ce1
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_fpx.h
@@ -0,0 +1,65 @@
+/********************************************************************************************
+* Supersingular Isogeny Key Encapsulation Library
+*
+* Abstract: core functions over GF(p) and GF(p^2)
+*********************************************************************************************/
+
+#pragma once
+
+#include <string.h>
+#include "sikep434r3.h"
+#include "sikep434r3_fp.h"
+
+#define fp2_encode S2N_SIKE_P434_R3_NAMESPACE(fp2_encode)
+void fp2_encode(const f2elm_t *x, unsigned char *enc);
+
+#define fp2_decode S2N_SIKE_P434_R3_NAMESPACE(fp2_decode)
+void fp2_decode(const unsigned char *x, f2elm_t *dec);
+
+#define copy_words S2N_SIKE_P434_R3_NAMESPACE(copy_words)
+void copy_words(const digit_t* a, digit_t* c, const unsigned int nwords);
+
+#define fp2copy S2N_SIKE_P434_R3_NAMESPACE(fp2copy)
+void fp2copy(const f2elm_t *a, f2elm_t *c);
+
+#define fp2div2 S2N_SIKE_P434_R3_NAMESPACE(fp2div2)
+void fp2div2(const f2elm_t *a, f2elm_t *c);
+
+#define mp_add S2N_SIKE_P434_R3_NAMESPACE(mp_add)
+unsigned int mp_add(const digit_t* a, const digit_t* b, digit_t* c, const unsigned int nwords);
+
+#define fp2sqr_mont S2N_SIKE_P434_R3_NAMESPACE(fp2sqr_mont)
+void fp2sqr_mont(const f2elm_t *a, f2elm_t *c);
+
+#define fp2mul_mont S2N_SIKE_P434_R3_NAMESPACE(fp2mul_mont)
+void fp2mul_mont(const f2elm_t *a, const f2elm_t *b, f2elm_t *c);
+
+#define fp2inv_mont S2N_SIKE_P434_R3_NAMESPACE(fp2inv_mont)
+void fp2inv_mont(f2elm_t *a);
+
+#define mp_shiftr1 S2N_SIKE_P434_R3_NAMESPACE(mp_shiftr1)
+void mp_shiftr1(digit_t* x, const unsigned int nwords);
+
+#define decode_to_digits S2N_SIKE_P434_R3_NAMESPACE(decode_to_digits)
+void decode_to_digits(const unsigned char* x, digit_t* dec, int nbytes, int ndigits);
+
+#define fpcopy S2N_SIKE_P434_R3_NAMESPACE(fpcopy)
+void fpcopy(const felm_t a, felm_t c);
+
+#define fpzero S2N_SIKE_P434_R3_NAMESPACE(fpzero)
+void fpzero(felm_t a);
+
+#define fp2add S2N_SIKE_P434_R3_NAMESPACE(fp2add)
+void fp2add(const f2elm_t *a, const f2elm_t *b, f2elm_t *c);
+
+#define fp2sub S2N_SIKE_P434_R3_NAMESPACE(fp2sub)
+void fp2sub(const f2elm_t *a, const f2elm_t *b, f2elm_t *c);
+
+#define mp_addfast S2N_SIKE_P434_R3_NAMESPACE(mp_addfast)
+void mp_addfast(const digit_t* a, const digit_t* b, digit_t* c);
+
+#define mp2_add S2N_SIKE_P434_R3_NAMESPACE(mp2_add)
+void mp2_add(const f2elm_t *a, const f2elm_t *b, f2elm_t *c);
+
+#define mp2_sub_p2 S2N_SIKE_P434_R3_NAMESPACE(mp2_sub_p2)
+void mp2_sub_p2(const f2elm_t *a, const f2elm_t *b, f2elm_t *c);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_kem.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_kem.c
new file mode 100644
index 0000000000..b32add7723
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_kem.c
@@ -0,0 +1,112 @@
+/********************************************************************************************
+* Supersingular Isogeny Key Encapsulation Library
+*
+* Abstract: supersingular isogeny key encapsulation (SIKE) protocol
+*********************************************************************************************/
+
+#include <string.h>
+#include "sikep434r3.h"
+#include "sikep434r3_fips202.h"
+#include "utils/s2n_safety.h"
+#include "tls/s2n_kem.h"
+#include "pq-crypto/s2n_pq.h"
+#include "pq-crypto/s2n_pq_random.h"
+#include "sikep434r3_fpx.h"
+#include "sikep434r3_api.h"
+
+/* SIKE's key generation
+ * Outputs: secret key sk (S2N_SIKE_P434_R3_SECRET_KEY_BYTES = S2N_SIKE_P434_R3_MSG_BYTES + S2N_SIKE_P434_R3_SECRETKEY_B_BYTES + S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES bytes)
+ * public key pk (S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES bytes) */
+int s2n_sike_p434_r3_crypto_kem_keypair(unsigned char *pk, unsigned char *sk)
+{
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+
+ /* Generate lower portion of secret key sk <- s||SK */
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(sk, S2N_SIKE_P434_R3_MSG_BYTES));
+ POSIX_GUARD(random_mod_order_B(sk + S2N_SIKE_P434_R3_MSG_BYTES));
+
+ /* Generate public key pk */
+ EphemeralKeyGeneration_B(sk + S2N_SIKE_P434_R3_MSG_BYTES, pk);
+
+ /* Append public key pk to secret key sk */
+ memcpy(&sk[S2N_SIKE_P434_R3_MSG_BYTES + S2N_SIKE_P434_R3_SECRETKEY_B_BYTES], pk, S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES);
+
+ return S2N_SUCCESS;
+}
+
+/* SIKE's encapsulation
+ * Input: public key pk (S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES bytes)
+ * Outputs: shared secret ss (S2N_SIKE_P434_R3_SHARED_SECRET_BYTES bytes)
+ * ciphertext message ct (S2N_SIKE_P434_R3_CIPHERTEXT_BYTES = S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES + S2N_SIKE_P434_R3_MSG_BYTES bytes) */
+int s2n_sike_p434_r3_crypto_kem_enc(unsigned char *ct, unsigned char *ss, const unsigned char *pk)
+{
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+
+ unsigned char ephemeralsk[S2N_SIKE_P434_R3_SECRETKEY_A_BYTES];
+ unsigned char jinvariant[S2N_SIKE_P434_R3_FP2_ENCODED_BYTES];
+ unsigned char h[S2N_SIKE_P434_R3_MSG_BYTES];
+ unsigned char temp[S2N_SIKE_P434_R3_CIPHERTEXT_BYTES+S2N_SIKE_P434_R3_MSG_BYTES];
+
+ /* Generate ephemeralsk <- G(m||pk) mod oA */
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(temp, S2N_SIKE_P434_R3_MSG_BYTES));
+ memcpy(&temp[S2N_SIKE_P434_R3_MSG_BYTES], pk, S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES);
+ shake256(ephemeralsk, S2N_SIKE_P434_R3_SECRETKEY_A_BYTES, temp, S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES+S2N_SIKE_P434_R3_MSG_BYTES);
+ ephemeralsk[S2N_SIKE_P434_R3_SECRETKEY_A_BYTES - 1] &= S2N_SIKE_P434_R3_MASK_ALICE;
+
+ /* Encrypt */
+ EphemeralKeyGeneration_A(ephemeralsk, ct);
+ EphemeralSecretAgreement_A(ephemeralsk, pk, jinvariant);
+ shake256(h, S2N_SIKE_P434_R3_MSG_BYTES, jinvariant, S2N_SIKE_P434_R3_FP2_ENCODED_BYTES);
+ for (int i = 0; i < S2N_SIKE_P434_R3_MSG_BYTES; i++) {
+ ct[i + S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES] = temp[i] ^ h[i];
+ }
+
+ /* Generate shared secret ss <- H(m||ct) */
+ memcpy(&temp[S2N_SIKE_P434_R3_MSG_BYTES], ct, S2N_SIKE_P434_R3_CIPHERTEXT_BYTES);
+ shake256(ss, S2N_SIKE_P434_R3_SHARED_SECRET_BYTES, temp, S2N_SIKE_P434_R3_CIPHERTEXT_BYTES+S2N_SIKE_P434_R3_MSG_BYTES);
+
+ return S2N_SUCCESS;
+}
+
+/* SIKE's decapsulation
+ * Input: secret key sk (S2N_SIKE_P434_R3_SECRET_KEY_BYTES = S2N_SIKE_P434_R3_MSG_BYTES + S2N_SIKE_P434_R3_SECRETKEY_B_BYTES + S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES bytes)
+ * ciphertext message ct (S2N_SIKE_P434_R3_CIPHERTEXT_BYTES = S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES + S2N_SIKE_P434_R3_MSG_BYTES bytes)
+ * Outputs: shared secret ss (S2N_SIKE_P434_R3_SHARED_SECRET_BYTES bytes) */
+int s2n_sike_p434_r3_crypto_kem_dec(unsigned char *ss, const unsigned char *ct, const unsigned char *sk)
+{
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+
+ unsigned char ephemeralsk_[S2N_SIKE_P434_R3_SECRETKEY_A_BYTES];
+ unsigned char jinvariant_[S2N_SIKE_P434_R3_FP2_ENCODED_BYTES];
+ unsigned char h_[S2N_SIKE_P434_R3_MSG_BYTES];
+ unsigned char c0_[S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES];
+ unsigned char temp[S2N_SIKE_P434_R3_CIPHERTEXT_BYTES+S2N_SIKE_P434_R3_MSG_BYTES];
+
+ /* Decrypt */
+ EphemeralSecretAgreement_B(sk + S2N_SIKE_P434_R3_MSG_BYTES, ct, jinvariant_);
+ shake256(h_, S2N_SIKE_P434_R3_MSG_BYTES, jinvariant_, S2N_SIKE_P434_R3_FP2_ENCODED_BYTES);
+ for (int i = 0; i < S2N_SIKE_P434_R3_MSG_BYTES; i++) {
+ temp[i] = ct[i + S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES] ^ h_[i];
+ }
+
+ /* Generate ephemeralsk_ <- G(m||pk) mod oA */
+ memcpy(&temp[S2N_SIKE_P434_R3_MSG_BYTES], &sk[S2N_SIKE_P434_R3_MSG_BYTES + S2N_SIKE_P434_R3_SECRETKEY_B_BYTES], S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES);
+ shake256(ephemeralsk_, S2N_SIKE_P434_R3_SECRETKEY_A_BYTES, temp, S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES+S2N_SIKE_P434_R3_MSG_BYTES);
+ ephemeralsk_[S2N_SIKE_P434_R3_SECRETKEY_A_BYTES - 1] &= S2N_SIKE_P434_R3_MASK_ALICE;
+
+ /* Generate shared secret ss <- H(m||ct), or output ss <- H(s||ct) in case of ct verification failure */
+ EphemeralKeyGeneration_A(ephemeralsk_, c0_);
+
+ /* Verify ciphertext.
+ * If c0_ and ct are NOT equal, decaps failed and we overwrite the shared secret
+ * with pseudorandom noise (ss = H(s||ct)) by performing the copy (dont_copy = false).
+ *
+ * If c0_ and ct are equal, then decaps succeeded and we skip the overwrite and output
+ * the actual shared secret: ss = H(m||ct) (dont_copy = true). */
+ bool dont_copy = s2n_constant_time_equals(c0_, ct, S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES);
+ POSIX_GUARD(s2n_constant_time_copy_or_dont(temp, sk, S2N_SIKE_P434_R3_MSG_BYTES, dont_copy));
+ memcpy(&temp[S2N_SIKE_P434_R3_MSG_BYTES], ct, S2N_SIKE_P434_R3_CIPHERTEXT_BYTES);
+ shake256(ss, S2N_SIKE_P434_R3_SHARED_SECRET_BYTES, temp, S2N_SIKE_P434_R3_CIPHERTEXT_BYTES+S2N_SIKE_P434_R3_MSG_BYTES);
+
+ return S2N_SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_sidh.c b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_sidh.c
new file mode 100644
index 0000000000..f570e27e32
--- /dev/null
+++ b/contrib/restricted/aws/s2n/pq-crypto/sike_r3/sikep434r3_sidh.c
@@ -0,0 +1,310 @@
+/********************************************************************************************
+* Supersingular Isogeny Key Encapsulation Library
+*
+* Abstract: ephemeral supersingular isogeny Diffie-Hellman key exchange (SIDH)
+*********************************************************************************************/
+
+#include "sikep434r3.h"
+#include "pq-crypto/s2n_pq_random.h"
+#include "utils/s2n_safety.h"
+#include "sikep434r3_fpx.h"
+#include "sikep434r3_ec_isogeny.h"
+#include "sikep434r3_api.h"
+
+/* Initialization of basis points */
+static void init_basis(const digit_t *gen, f2elm_t *XP, f2elm_t *XQ, f2elm_t *XR)
+{
+ fpcopy(gen, XP->e[0]);
+ fpcopy(gen + S2N_SIKE_P434_R3_NWORDS_FIELD, XP->e[1]);
+ fpcopy(gen + 2*S2N_SIKE_P434_R3_NWORDS_FIELD, XQ->e[0]);
+ fpcopy(gen + 3*S2N_SIKE_P434_R3_NWORDS_FIELD, XQ->e[1]);
+ fpcopy(gen + 4*S2N_SIKE_P434_R3_NWORDS_FIELD, XR->e[0]);
+ fpcopy(gen + 5*S2N_SIKE_P434_R3_NWORDS_FIELD, XR->e[1]);
+}
+
+/* Generation of Bob's secret key
+ * Outputs random value in [0, 2^Floor(Log(2, oB)) - 1] */
+int random_mod_order_B(unsigned char* random_digits)
+{
+ POSIX_GUARD_RESULT(s2n_get_random_bytes(random_digits, S2N_SIKE_P434_R3_SECRETKEY_B_BYTES));
+ random_digits[S2N_SIKE_P434_R3_SECRETKEY_B_BYTES-1] &= S2N_SIKE_P434_R3_MASK_BOB; /* Masking last byte */
+
+ return 0;
+}
+
+/* Alice's ephemeral public key generation
+ * Input: a private key PrivateKeyA in the range [0, 2^eA - 1].
+ * Output: the public key PublicKeyA consisting of 3 elements in GF(p^2) which are encoded
+ * by removing leading 0 bytes. */
+int EphemeralKeyGeneration_A(const unsigned char* PrivateKeyA, unsigned char* PublicKeyA)
+{
+ point_proj_t R, phiP = {0}, phiQ = {0}, phiR = {0}, pts[S2N_SIKE_P434_R3_MAX_INT_POINTS_ALICE];
+ f2elm_t _XPA, _XQA, _XRA, coeff[3], _A24plus = {0}, _C24 = {0}, _A = {0};
+ f2elm_t *XPA=&_XPA, *XQA=&_XQA, *XRA=&_XRA, *A24plus=&_A24plus, *C24=&_C24, *A=&_A;
+ unsigned int i, row, m, tree_index = 0, pts_index[S2N_SIKE_P434_R3_MAX_INT_POINTS_ALICE], npts = 0, ii = 0;
+ digit_t SecretKeyA[S2N_SIKE_P434_R3_NWORDS_ORDER] = {0};
+
+ /* Initialize basis points */
+ init_basis((const digit_t*)A_gen, XPA, XQA, XRA);
+ init_basis((const digit_t*)B_gen, &phiP->X, &phiQ->X, &phiR->X);
+ fpcopy((const digit_t*)&Montgomery_one, (phiP->Z.e)[0]);
+ fpcopy((const digit_t*)&Montgomery_one, (phiQ->Z.e)[0]);
+ fpcopy((const digit_t*)&Montgomery_one, (phiR->Z.e)[0]);
+
+ /* Initialize constants: A24plus = A+2C, C24 = 4C, where A=6, C=1 */
+ fpcopy((const digit_t*)&Montgomery_one, A24plus->e[0]);
+ mp2_add(A24plus, A24plus, A24plus);
+ mp2_add(A24plus, A24plus, C24);
+ mp2_add(A24plus, C24, A);
+ mp2_add(C24, C24, A24plus);
+
+ /* Retrieve kernel point */
+ decode_to_digits(PrivateKeyA, SecretKeyA, S2N_SIKE_P434_R3_SECRETKEY_A_BYTES, S2N_SIKE_P434_R3_NWORDS_ORDER);
+ LADDER3PT(XPA, XQA, XRA, SecretKeyA, S2N_SIKE_P434_R3_ALICE, R, A);
+
+ /* Traverse tree */
+ tree_index = 0;
+ for (row = 1; row < S2N_SIKE_P434_R3_MAX_ALICE; row++) {
+ while (tree_index < S2N_SIKE_P434_R3_MAX_ALICE-row) {
+ fp2copy(&R->X, &pts[npts]->X);
+ fp2copy(&R->Z, &pts[npts]->Z);
+ pts_index[npts++] = tree_index;
+ m = strat_Alice[ii++];
+ xDBLe(R, R, A24plus, C24, (int)(2*m));
+ tree_index += m;
+ }
+ get_4_isog(R, A24plus, C24, coeff);
+
+ for (i = 0; i < npts; i++) {
+ eval_4_isog(pts[i], coeff);
+ }
+ eval_4_isog(phiP, coeff);
+ eval_4_isog(phiQ, coeff);
+ eval_4_isog(phiR, coeff);
+
+ fp2copy(&pts[npts-1]->X, &R->X);
+ fp2copy(&pts[npts-1]->Z, &R->Z);
+ tree_index = pts_index[npts-1];
+ npts -= 1;
+ }
+
+ get_4_isog(R, A24plus, C24, coeff);
+ eval_4_isog(phiP, coeff);
+ eval_4_isog(phiQ, coeff);
+ eval_4_isog(phiR, coeff);
+
+ inv_3_way(&phiP->Z, &phiQ->Z, &phiR->Z);
+ fp2mul_mont(&phiP->X, &phiP->Z, &phiP->X);
+ fp2mul_mont(&phiQ->X, &phiQ->Z, &phiQ->X);
+ fp2mul_mont(&phiR->X, &phiR->Z, &phiR->X);
+
+ /* Format public key */
+ fp2_encode(&phiP->X, PublicKeyA);
+ fp2_encode(&phiQ->X, PublicKeyA + S2N_SIKE_P434_R3_FP2_ENCODED_BYTES);
+ fp2_encode(&phiR->X, PublicKeyA + 2*S2N_SIKE_P434_R3_FP2_ENCODED_BYTES);
+
+ return 0;
+}
+
+/* Bob's ephemeral public key generation
+ * Input: a private key PrivateKeyB in the range [0, 2^Floor(Log(2,oB)) - 1].
+ * Output: the public key PublicKeyB consisting of 3 elements in GF(p^2) which are encoded
+ * by removing leading 0 bytes. */
+int EphemeralKeyGeneration_B(const unsigned char* PrivateKeyB, unsigned char* PublicKeyB)
+{
+ point_proj_t R, phiP = {0}, phiQ = {0}, phiR = {0}, pts[S2N_SIKE_P434_R3_MAX_INT_POINTS_BOB];
+ f2elm_t _XPB, _XQB, _XRB, coeff[3], _A24plus = {0}, _A24minus = {0}, _A = {0};
+ f2elm_t *XPB=&_XPB, *XQB=&_XQB, *XRB=&_XRB, *A24plus=&_A24plus, *A24minus=&_A24minus, *A=&_A;
+
+ unsigned int i, row, m, tree_index = 0, pts_index[S2N_SIKE_P434_R3_MAX_INT_POINTS_BOB], npts = 0, ii = 0;
+ digit_t SecretKeyB[S2N_SIKE_P434_R3_NWORDS_ORDER] = {0};
+
+ /* Initialize basis points */
+ init_basis((const digit_t*)B_gen, XPB, XQB, XRB);
+ init_basis((const digit_t*)A_gen, &phiP->X, &phiQ->X, &phiR->X);
+ fpcopy((const digit_t*)&Montgomery_one, (phiP->Z.e)[0]);
+ fpcopy((const digit_t*)&Montgomery_one, (phiQ->Z.e)[0]);
+ fpcopy((const digit_t*)&Montgomery_one, (phiR->Z.e)[0]);
+
+ /* Initialize constants: A24minus = A-2C, A24plus = A+2C, where A=6, C=1 */
+ fpcopy((const digit_t*)&Montgomery_one, A24plus->e[0]);
+ mp2_add(A24plus, A24plus, A24plus);
+ mp2_add(A24plus, A24plus, A24minus);
+ mp2_add(A24plus, A24minus, A);
+ mp2_add(A24minus, A24minus, A24plus);
+
+ /* Retrieve kernel point */
+ decode_to_digits(PrivateKeyB, SecretKeyB, S2N_SIKE_P434_R3_SECRETKEY_B_BYTES, S2N_SIKE_P434_R3_NWORDS_ORDER);
+ LADDER3PT(XPB, XQB, XRB, SecretKeyB, S2N_SIKE_P434_R3_BOB, R, A);
+
+ /* Traverse tree */
+ tree_index = 0;
+ for (row = 1; row < S2N_SIKE_P434_R3_MAX_BOB; row++) {
+ while (tree_index < S2N_SIKE_P434_R3_MAX_BOB-row) {
+ fp2copy(&R->X, &pts[npts]->X);
+ fp2copy(&R->Z, &pts[npts]->Z);
+ pts_index[npts++] = tree_index;
+ m = strat_Bob[ii++];
+ xTPLe(R, R, A24minus, A24plus, (int)m);
+ tree_index += m;
+ }
+ get_3_isog(R, A24minus, A24plus, coeff);
+
+ for (i = 0; i < npts; i++) {
+ eval_3_isog(pts[i], coeff);
+ }
+ eval_3_isog(phiP, coeff);
+ eval_3_isog(phiQ, coeff);
+ eval_3_isog(phiR, coeff);
+
+ fp2copy(&pts[npts-1]->X, &R->X);
+ fp2copy(&pts[npts-1]->Z, &R->Z);
+ tree_index = pts_index[npts-1];
+ npts -= 1;
+ }
+
+ get_3_isog(R, A24minus, A24plus, coeff);
+ eval_3_isog(phiP, coeff);
+ eval_3_isog(phiQ, coeff);
+ eval_3_isog(phiR, coeff);
+
+ inv_3_way(&phiP->Z, &phiQ->Z, &phiR->Z);
+ fp2mul_mont(&phiP->X, &phiP->Z, &phiP->X);
+ fp2mul_mont(&phiQ->X, &phiQ->Z, &phiQ->X);
+ fp2mul_mont(&phiR->X, &phiR->Z, &phiR->X);
+
+ /* Format public key */
+ fp2_encode(&phiP->X, PublicKeyB);
+ fp2_encode(&phiQ->X, PublicKeyB + S2N_SIKE_P434_R3_FP2_ENCODED_BYTES);
+ fp2_encode(&phiR->X, PublicKeyB + 2*S2N_SIKE_P434_R3_FP2_ENCODED_BYTES);
+
+ return 0;
+}
+
+/* Alice's ephemeral shared secret computation
+ * It produces a shared secret key SharedSecretA using her secret key PrivateKeyA and Bob's public key PublicKeyB
+ * Inputs: Alice's PrivateKeyA is an integer in the range [0, oA-1].
+ * Bob's PublicKeyB consists of 3 elements in GF(p^2) encoded by removing leading 0 bytes.
+ * Output: a shared secret SharedSecretA that consists of one element in GF(p^2) encoded
+ * by removing leading 0 bytes. */
+int EphemeralSecretAgreement_A(const unsigned char* PrivateKeyA, const unsigned char* PublicKeyB,
+ unsigned char* SharedSecretA)
+{
+ point_proj_t R, pts[S2N_SIKE_P434_R3_MAX_INT_POINTS_ALICE];
+ f2elm_t coeff[3], PKB[3], _jinv;
+ f2elm_t _A24plus = {0}, _C24 = {0}, _A = {0};
+ f2elm_t *jinv=&_jinv, *A24plus=&_A24plus, *C24=&_C24, *A=&_A;
+ unsigned int i, row, m, tree_index = 0, pts_index[S2N_SIKE_P434_R3_MAX_INT_POINTS_ALICE], npts = 0, ii = 0;
+ digit_t SecretKeyA[S2N_SIKE_P434_R3_NWORDS_ORDER] = {0};
+
+ /* Initialize images of Bob's basis */
+ fp2_decode(PublicKeyB, &PKB[0]);
+ fp2_decode(PublicKeyB + S2N_SIKE_P434_R3_FP2_ENCODED_BYTES, &PKB[1]);
+ fp2_decode(PublicKeyB + 2*S2N_SIKE_P434_R3_FP2_ENCODED_BYTES, &PKB[2]);
+
+ /* Initialize constants: A24plus = A+2C, C24 = 4C, where C=1 */
+ get_A(&PKB[0], &PKB[1], &PKB[2], A);
+ mp_add((const digit_t*)&Montgomery_one, (const digit_t*)&Montgomery_one, C24->e[0], S2N_SIKE_P434_R3_NWORDS_FIELD);
+ mp2_add(A, C24, A24plus);
+ mp_add(C24->e[0], C24->e[0], C24->e[0], S2N_SIKE_P434_R3_NWORDS_FIELD);
+
+ /* Retrieve kernel point */
+ decode_to_digits(PrivateKeyA, SecretKeyA, S2N_SIKE_P434_R3_SECRETKEY_A_BYTES, S2N_SIKE_P434_R3_NWORDS_ORDER);
+ LADDER3PT(&PKB[0], &PKB[1], &PKB[2], SecretKeyA, S2N_SIKE_P434_R3_ALICE, R, A);
+
+ /* Traverse tree */
+ tree_index = 0;
+ for (row = 1; row < S2N_SIKE_P434_R3_MAX_ALICE; row++) {
+ while (tree_index < S2N_SIKE_P434_R3_MAX_ALICE-row) {
+ fp2copy(&R->X, &pts[npts]->X);
+ fp2copy(&R->Z, &pts[npts]->Z);
+ pts_index[npts++] = tree_index;
+ m = strat_Alice[ii++];
+ xDBLe(R, R, A24plus, C24, (int)(2*m));
+ tree_index += m;
+ }
+ get_4_isog(R, A24plus, C24, coeff);
+
+ for (i = 0; i < npts; i++) {
+ eval_4_isog(pts[i], coeff);
+ }
+
+ fp2copy(&pts[npts-1]->X, &R->X);
+ fp2copy(&pts[npts-1]->Z, &R->Z);
+ tree_index = pts_index[npts-1];
+ npts -= 1;
+ }
+
+ get_4_isog(R, A24plus, C24, coeff);
+ mp2_add(A24plus, A24plus, A24plus);
+ fp2sub(A24plus, C24, A24plus);
+ fp2add(A24plus, A24plus, A24plus);
+ j_inv(A24plus, C24, jinv);
+ fp2_encode(jinv, SharedSecretA); /* Format shared secret */
+
+ return 0;
+}
+
+/* Bob's ephemeral shared secret computation
+ * It produces a shared secret key SharedSecretB using his secret key PrivateKeyB and Alice's public key PublicKeyA
+ * Inputs: Bob's PrivateKeyB is an integer in the range [0, 2^Floor(Log(2,oB)) - 1].
+ * Alice's PublicKeyA consists of 3 elements in GF(p^2) encoded by removing leading 0 bytes.
+ * Output: a shared secret SharedSecretB that consists of one element in GF(p^2) encoded
+ * by removing leading 0 bytes. */
+int EphemeralSecretAgreement_B(const unsigned char* PrivateKeyB, const unsigned char* PublicKeyA,
+ unsigned char* SharedSecretB)
+{
+ point_proj_t R, pts[S2N_SIKE_P434_R3_MAX_INT_POINTS_BOB];
+ f2elm_t coeff[3], PKB[3], _jinv;
+ f2elm_t _A24plus = {0}, _A24minus = {0}, _A = {0};
+ f2elm_t *jinv=&_jinv, *A24plus=&_A24plus, *A24minus=&_A24minus, *A=&_A;
+ unsigned int i, row, m, tree_index = 0, pts_index[S2N_SIKE_P434_R3_MAX_INT_POINTS_BOB], npts = 0, ii = 0;
+ digit_t SecretKeyB[S2N_SIKE_P434_R3_NWORDS_ORDER] = {0};
+
+ /* Initialize images of Alice's basis */
+ fp2_decode(PublicKeyA, &PKB[0]);
+ fp2_decode(PublicKeyA + S2N_SIKE_P434_R3_FP2_ENCODED_BYTES, &PKB[1]);
+ fp2_decode(PublicKeyA + 2*S2N_SIKE_P434_R3_FP2_ENCODED_BYTES, &PKB[2]);
+
+ /* Initialize constants: A24plus = A+2C, A24minus = A-2C, where C=1 */
+ get_A(&PKB[0], &PKB[1], &PKB[2], A);
+ mp_add((const digit_t*)&Montgomery_one, (const digit_t*)&Montgomery_one, A24minus->e[0], S2N_SIKE_P434_R3_NWORDS_FIELD);
+ mp2_add(A, A24minus, A24plus);
+ mp2_sub_p2(A, A24minus, A24minus);
+
+ /* Retrieve kernel point */
+ decode_to_digits(PrivateKeyB, SecretKeyB, S2N_SIKE_P434_R3_SECRETKEY_B_BYTES, S2N_SIKE_P434_R3_NWORDS_ORDER);
+ LADDER3PT(&PKB[0], &PKB[1], &PKB[2], SecretKeyB, S2N_SIKE_P434_R3_BOB, R, A);
+
+ /* Traverse tree */
+ tree_index = 0;
+ for (row = 1; row < S2N_SIKE_P434_R3_MAX_BOB; row++) {
+ while (tree_index < S2N_SIKE_P434_R3_MAX_BOB-row) {
+ fp2copy(&R->X, &pts[npts]->X);
+ fp2copy(&R->Z, &pts[npts]->Z);
+ pts_index[npts++] = tree_index;
+ m = strat_Bob[ii++];
+ xTPLe(R, R, A24minus, A24plus, (int)m);
+ tree_index += m;
+ }
+ get_3_isog(R, A24minus, A24plus, coeff);
+
+ for (i = 0; i < npts; i++) {
+ eval_3_isog(pts[i], coeff);
+ }
+
+ fp2copy(&pts[npts-1]->X, &R->X);
+ fp2copy(&pts[npts-1]->Z, &R->Z);
+ tree_index = pts_index[npts-1];
+ npts -= 1;
+ }
+
+ get_3_isog(R, A24minus, A24plus, coeff);
+ fp2add(A24plus, A24minus, A);
+ fp2add(A, A, A);
+ fp2sub(A24plus, A24minus, A24plus);
+ j_inv(A, A24plus, jinv);
+ fp2_encode(jinv, SharedSecretB); /* Format shared secret */
+
+ return 0;
+}
diff --git a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer.c b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer.c
index 04ced5b229..a2b1024732 100644
--- a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer.c
+++ b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer.c
@@ -26,16 +26,17 @@
S2N_RESULT s2n_stuffer_validate(const struct s2n_stuffer* stuffer)
{
/**
- * Note that we do not assert any properties on the alloced, growable, and tainted fields,
- * as all possible combinations of boolean values in those fields are valid.
+ * Note that we do not assert any properties on the tainted field,
+ * as any boolean value in that field is valid.
*/
- ENSURE_REF(stuffer);
- GUARD_RESULT(s2n_blob_validate(&stuffer->blob));
+ RESULT_ENSURE_REF(stuffer);
+ RESULT_GUARD(s2n_blob_validate(&stuffer->blob));
+ RESULT_DEBUG_ENSURE(S2N_IMPLIES(stuffer->growable, stuffer->alloced), S2N_ERR_SAFETY);
/* <= is valid because we can have a fully written/read stuffer */
- DEBUG_ENSURE(stuffer->high_water_mark <= stuffer->blob.size, S2N_ERR_SAFETY);
- DEBUG_ENSURE(stuffer->write_cursor <= stuffer->high_water_mark, S2N_ERR_SAFETY);
- DEBUG_ENSURE(stuffer->read_cursor <= stuffer->write_cursor, S2N_ERR_SAFETY);
+ RESULT_DEBUG_ENSURE(stuffer->high_water_mark <= stuffer->blob.size, S2N_ERR_SAFETY);
+ RESULT_DEBUG_ENSURE(stuffer->write_cursor <= stuffer->high_water_mark, S2N_ERR_SAFETY);
+ RESULT_DEBUG_ENSURE(stuffer->read_cursor <= stuffer->write_cursor, S2N_ERR_SAFETY);
return S2N_RESULT_OK;
}
@@ -46,15 +47,15 @@ S2N_RESULT s2n_stuffer_reservation_validate(const struct s2n_stuffer_reservation
* for CBMC (see https://github.com/awslabs/s2n/issues/2290). We can roll back
* this change once CBMC can handle common subexpression elimination.
*/
- ENSURE_REF(reservation);
+ RESULT_ENSURE_REF(reservation);
const struct s2n_stuffer_reservation reserve_obj = *reservation;
- GUARD_RESULT(s2n_stuffer_validate(reserve_obj.stuffer));
+ RESULT_GUARD(s2n_stuffer_validate(reserve_obj.stuffer));
const struct s2n_stuffer stuffer_obj = *(reserve_obj.stuffer);
- ENSURE(stuffer_obj.blob.size >= reserve_obj.length, S2N_ERR_SAFETY);
+ RESULT_ENSURE(stuffer_obj.blob.size >= reserve_obj.length, S2N_ERR_SAFETY);
if (reserve_obj.length > 0) {
- ENSURE(reserve_obj.write_cursor < stuffer_obj.write_cursor, S2N_ERR_SAFETY);
- ENSURE(
+ RESULT_ENSURE(reserve_obj.write_cursor < stuffer_obj.write_cursor, S2N_ERR_SAFETY);
+ RESULT_ENSURE(
S2N_MEM_IS_WRITABLE(stuffer_obj.blob.data + reserve_obj.write_cursor, reserve_obj.length),
S2N_ERR_SAFETY
);
@@ -65,8 +66,8 @@ S2N_RESULT s2n_stuffer_reservation_validate(const struct s2n_stuffer_reservation
int s2n_stuffer_init(struct s2n_stuffer *stuffer, struct s2n_blob *in)
{
- ENSURE_POSIX_MUT(stuffer);
- PRECONDITION_POSIX(s2n_blob_validate(in));
+ POSIX_ENSURE_MUT(stuffer);
+ POSIX_PRECONDITION(s2n_blob_validate(in));
stuffer->blob = *in;
stuffer->read_cursor = 0;
stuffer->write_cursor = 0;
@@ -74,37 +75,39 @@ int s2n_stuffer_init(struct s2n_stuffer *stuffer, struct s2n_blob *in)
stuffer->alloced = 0;
stuffer->growable = 0;
stuffer->tainted = 0;
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
int s2n_stuffer_alloc(struct s2n_stuffer *stuffer, const uint32_t size)
{
- notnull_check(stuffer);
+ POSIX_ENSURE_REF(stuffer);
*stuffer = (struct s2n_stuffer) {0};
- GUARD(s2n_alloc(&stuffer->blob, size));
- GUARD(s2n_stuffer_init(stuffer, &stuffer->blob));
+ POSIX_GUARD(s2n_alloc(&stuffer->blob, size));
+ POSIX_GUARD(s2n_stuffer_init(stuffer, &stuffer->blob));
stuffer->alloced = 1;
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
int s2n_stuffer_growable_alloc(struct s2n_stuffer *stuffer, const uint32_t size)
{
- GUARD(s2n_stuffer_alloc(stuffer, size));
+ POSIX_GUARD(s2n_stuffer_alloc(stuffer, size));
stuffer->growable = 1;
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
int s2n_stuffer_free(struct s2n_stuffer *stuffer)
{
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
if (stuffer != NULL) {
if (stuffer->alloced) {
- GUARD(s2n_free(&stuffer->blob));
+ POSIX_GUARD(s2n_free(&stuffer->blob));
}
*stuffer = (struct s2n_stuffer) {0};
}
@@ -113,9 +116,9 @@ int s2n_stuffer_free(struct s2n_stuffer *stuffer)
int s2n_stuffer_resize(struct s2n_stuffer *stuffer, const uint32_t size)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- ENSURE_POSIX(!stuffer->tainted, S2N_ERR_RESIZE_TAINTED_STUFFER);
- ENSURE_POSIX(stuffer->growable, S2N_ERR_RESIZE_STATIC_STUFFER);
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_ENSURE(!stuffer->tainted, S2N_ERR_RESIZE_TAINTED_STUFFER);
+ POSIX_ENSURE(stuffer->growable, S2N_ERR_RESIZE_STATIC_STUFFER);
if (size == stuffer->blob.size) {
return S2N_SUCCESS;
@@ -127,68 +130,70 @@ int s2n_stuffer_resize(struct s2n_stuffer *stuffer, const uint32_t size)
}
if (size < stuffer->blob.size) {
- memset_check(stuffer->blob.data + size, S2N_WIPE_PATTERN, (stuffer->blob.size - size));
+ POSIX_CHECKED_MEMSET(stuffer->blob.data + size, S2N_WIPE_PATTERN, (stuffer->blob.size - size));
if (stuffer->read_cursor > size) stuffer->read_cursor = size;
if (stuffer->write_cursor > size) stuffer->write_cursor = size;
if (stuffer->high_water_mark > size) stuffer->high_water_mark = size;
stuffer->blob.size = size;
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
- GUARD(s2n_realloc(&stuffer->blob, size));
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_GUARD(s2n_realloc(&stuffer->blob, size));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
int s2n_stuffer_resize_if_empty(struct s2n_stuffer *stuffer, const uint32_t size)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
if (stuffer->blob.data == NULL) {
- ENSURE_POSIX(!stuffer->tainted, S2N_ERR_RESIZE_TAINTED_STUFFER);
- ENSURE_POSIX(stuffer->growable, S2N_ERR_RESIZE_STATIC_STUFFER);
- GUARD(s2n_realloc(&stuffer->blob, size));
+ POSIX_ENSURE(!stuffer->tainted, S2N_ERR_RESIZE_TAINTED_STUFFER);
+ POSIX_ENSURE(stuffer->growable, S2N_ERR_RESIZE_STATIC_STUFFER);
+ POSIX_GUARD(s2n_realloc(&stuffer->blob, size));
}
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
int s2n_stuffer_rewrite(struct s2n_stuffer *stuffer)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
stuffer->write_cursor = 0;
stuffer->read_cursor = 0;
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
int s2n_stuffer_rewind_read(struct s2n_stuffer *stuffer, const uint32_t size)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- ENSURE_POSIX(stuffer->read_cursor >= size, S2N_ERR_STUFFER_OUT_OF_DATA);
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_ENSURE(stuffer->read_cursor >= size, S2N_ERR_STUFFER_OUT_OF_DATA);
stuffer->read_cursor -= size;
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
int s2n_stuffer_reread(struct s2n_stuffer *stuffer)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
stuffer->read_cursor = 0;
return S2N_SUCCESS;
}
int s2n_stuffer_wipe_n(struct s2n_stuffer *stuffer, const uint32_t size)
{
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
if (size >= stuffer->write_cursor) {
return s2n_stuffer_wipe(stuffer);
}
/* We know that size is now less than write_cursor */
stuffer->write_cursor -= size;
- memset_check(stuffer->blob.data + stuffer->write_cursor, S2N_WIPE_PATTERN, size);
+ POSIX_CHECKED_MEMSET(stuffer->blob.data + stuffer->write_cursor, S2N_WIPE_PATTERN, size);
stuffer->read_cursor = MIN(stuffer->read_cursor, stuffer->write_cursor);
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
@@ -198,21 +203,23 @@ bool s2n_stuffer_is_consumed(struct s2n_stuffer *stuffer) {
int s2n_stuffer_wipe(struct s2n_stuffer *stuffer)
{
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
if (!s2n_stuffer_is_wiped(stuffer)) {
- memset_check(stuffer->blob.data, S2N_WIPE_PATTERN, stuffer->high_water_mark);
+ POSIX_CHECKED_MEMSET(stuffer->blob.data, S2N_WIPE_PATTERN, stuffer->high_water_mark);
}
stuffer->tainted = 0;
stuffer->write_cursor = 0;
stuffer->read_cursor = 0;
stuffer->high_water_mark = 0;
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
int s2n_stuffer_skip_read(struct s2n_stuffer *stuffer, uint32_t n)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- S2N_ERROR_IF(s2n_stuffer_data_available(stuffer) < n, S2N_ERR_STUFFER_OUT_OF_DATA);
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_ENSURE(s2n_stuffer_data_available(stuffer) >= n, S2N_ERR_STUFFER_OUT_OF_DATA);
stuffer->read_cursor += n;
return S2N_SUCCESS;
@@ -220,124 +227,124 @@ int s2n_stuffer_skip_read(struct s2n_stuffer *stuffer, uint32_t n)
void *s2n_stuffer_raw_read(struct s2n_stuffer *stuffer, uint32_t data_len)
{
- GUARD_PTR(s2n_stuffer_skip_read(stuffer, data_len));
+ PTR_GUARD_POSIX(s2n_stuffer_skip_read(stuffer, data_len));
stuffer->tainted = 1;
- return stuffer->blob.data + stuffer->read_cursor - data_len;
+ return (stuffer->blob.data) ? (stuffer->blob.data + stuffer->read_cursor - data_len) : NULL;
}
int s2n_stuffer_read(struct s2n_stuffer *stuffer, struct s2n_blob *out)
{
- notnull_check(out);
+ POSIX_ENSURE_REF(out);
return s2n_stuffer_read_bytes(stuffer, out->data, out->size);
}
int s2n_stuffer_erase_and_read(struct s2n_stuffer *stuffer, struct s2n_blob *out)
{
- GUARD(s2n_stuffer_skip_read(stuffer, out->size));
+ POSIX_GUARD(s2n_stuffer_skip_read(stuffer, out->size));
- void *ptr = stuffer->blob.data + stuffer->read_cursor - out->size;
- ENSURE_POSIX(S2N_MEM_IS_READABLE(ptr, out->size), S2N_ERR_NULL);
+ void *ptr = (stuffer->blob.data) ? (stuffer->blob.data + stuffer->read_cursor - out->size) : NULL;
+ POSIX_ENSURE(S2N_MEM_IS_READABLE(ptr, out->size), S2N_ERR_NULL);
- memcpy_check(out->data, ptr, out->size);
- memset_check(ptr, 0, out->size);
+ POSIX_CHECKED_MEMCPY(out->data, ptr, out->size);
+ POSIX_CHECKED_MEMSET(ptr, 0, out->size);
return S2N_SUCCESS;
}
int s2n_stuffer_read_bytes(struct s2n_stuffer *stuffer, uint8_t * data, uint32_t size)
{
- notnull_check(data);
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- GUARD(s2n_stuffer_skip_read(stuffer, size));
- notnull_check(stuffer->blob.data);
+ POSIX_ENSURE_REF(data);
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_GUARD(s2n_stuffer_skip_read(stuffer, size));
+ POSIX_ENSURE_REF(stuffer->blob.data);
void *ptr = stuffer->blob.data + stuffer->read_cursor - size;
- memcpy_check(data, ptr, size);
+ POSIX_CHECKED_MEMCPY(data, ptr, size);
return S2N_SUCCESS;
}
int s2n_stuffer_erase_and_read_bytes(struct s2n_stuffer *stuffer, uint8_t * data, uint32_t size)
{
- GUARD(s2n_stuffer_skip_read(stuffer, size));
- notnull_check(stuffer->blob.data);
+ POSIX_GUARD(s2n_stuffer_skip_read(stuffer, size));
+ POSIX_ENSURE_REF(stuffer->blob.data);
void *ptr = stuffer->blob.data + stuffer->read_cursor - size;
- memcpy_check(data, ptr, size);
- memset_check(ptr, 0, size);
+ POSIX_CHECKED_MEMCPY(data, ptr, size);
+ POSIX_CHECKED_MEMSET(ptr, 0, size);
return S2N_SUCCESS;
}
int s2n_stuffer_skip_write(struct s2n_stuffer *stuffer, const uint32_t n)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- GUARD(s2n_stuffer_reserve_space(stuffer, n));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_GUARD(s2n_stuffer_reserve_space(stuffer, n));
stuffer->write_cursor += n;
stuffer->high_water_mark = MAX(stuffer->write_cursor, stuffer->high_water_mark);
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
void *s2n_stuffer_raw_write(struct s2n_stuffer *stuffer, const uint32_t data_len)
{
- GUARD_PTR(s2n_stuffer_skip_write(stuffer, data_len));
+ PTR_GUARD_POSIX(s2n_stuffer_skip_write(stuffer, data_len));
stuffer->tainted = 1;
- return stuffer->blob.data + stuffer->write_cursor - data_len;
+ return (stuffer->blob.data) ? (stuffer->blob.data + stuffer->write_cursor - data_len) : NULL;
}
int s2n_stuffer_write(struct s2n_stuffer *stuffer, const struct s2n_blob *in)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- PRECONDITION_POSIX(s2n_blob_validate(in));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_PRECONDITION(s2n_blob_validate(in));
return s2n_stuffer_write_bytes(stuffer, in->data, in->size);
}
int s2n_stuffer_write_bytes(struct s2n_stuffer *stuffer, const uint8_t * data, const uint32_t size)
{
- ENSURE_POSIX(S2N_MEM_IS_READABLE(data, size), S2N_ERR_SAFETY);
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- GUARD(s2n_stuffer_skip_write(stuffer, size));
+ POSIX_ENSURE(S2N_MEM_IS_READABLE(data, size), S2N_ERR_SAFETY);
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_GUARD(s2n_stuffer_skip_write(stuffer, size));
void *ptr = stuffer->blob.data + stuffer->write_cursor - size;
- ENSURE_POSIX(S2N_MEM_IS_READABLE(ptr, size), S2N_ERR_NULL);
+ POSIX_ENSURE(S2N_MEM_IS_READABLE(ptr, size), S2N_ERR_NULL);
if (ptr == data) {
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
- memcpy_check(ptr, data, size);
+ POSIX_CHECKED_MEMCPY(ptr, data, size);
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
int s2n_stuffer_writev_bytes(struct s2n_stuffer *stuffer, const struct iovec* iov, size_t iov_count, uint32_t offs, uint32_t size)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- notnull_check(iov);
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_ENSURE_REF(iov);
void *ptr = s2n_stuffer_raw_write(stuffer, size);
- ENSURE_POSIX(S2N_MEM_IS_READABLE(ptr, size), S2N_ERR_NULL);
+ POSIX_ENSURE(S2N_MEM_IS_READABLE(ptr, size), S2N_ERR_NULL);
size_t size_left = size, to_skip = offs;
- for (int i = 0; i < iov_count; i++) {
+ for (size_t i = 0; i < iov_count; i++) {
if (to_skip >= iov[i].iov_len) {
to_skip -= iov[i].iov_len;
continue;
}
size_t iov_len_op = iov[i].iov_len - to_skip;
- ENSURE_POSIX(iov_len_op <= UINT32_MAX, S2N_FAILURE);
+ POSIX_ENSURE(iov_len_op <= UINT32_MAX, S2N_FAILURE);
uint32_t iov_len = (uint32_t)iov_len_op;
uint32_t iov_size_to_take = MIN(size_left, iov_len);
- notnull_check(iov[i].iov_base);
- ENSURE_POSIX(to_skip < iov[i].iov_len, S2N_FAILURE);
- memcpy_check(ptr, ((uint8_t*)(iov[i].iov_base)) + to_skip, iov_size_to_take);
+ POSIX_ENSURE_REF(iov[i].iov_base);
+ POSIX_ENSURE(to_skip < iov[i].iov_len, S2N_FAILURE);
+ POSIX_CHECKED_MEMCPY(ptr, ((uint8_t*)(iov[i].iov_base)) + to_skip, iov_size_to_take);
size_left -= iov_size_to_take;
if (size_left == 0) {
break;
@@ -351,29 +358,29 @@ int s2n_stuffer_writev_bytes(struct s2n_stuffer *stuffer, const struct iovec* io
static int s2n_stuffer_copy_impl(struct s2n_stuffer *from, struct s2n_stuffer *to, const uint32_t len)
{
- GUARD(s2n_stuffer_skip_read(from, len));
- GUARD(s2n_stuffer_skip_write(to, len));
+ POSIX_GUARD(s2n_stuffer_skip_read(from, len));
+ POSIX_GUARD(s2n_stuffer_skip_write(to, len));
- uint8_t *from_ptr = from->blob.data + from->read_cursor - len;
- uint8_t *to_ptr = to->blob.data + to->write_cursor - len;
+ uint8_t *from_ptr = (from->blob.data) ? (from->blob.data + from->read_cursor - len) : NULL;
+ uint8_t *to_ptr = (to->blob.data) ? (to->blob.data + to->write_cursor - len) : NULL;
- memcpy_check(to_ptr, from_ptr, len);
+ POSIX_CHECKED_MEMCPY(to_ptr, from_ptr, len);
return S2N_SUCCESS;
}
int s2n_stuffer_reserve_space(struct s2n_stuffer *stuffer, uint32_t n)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
if (s2n_stuffer_space_remaining(stuffer) < n) {
- S2N_ERROR_IF(!stuffer->growable, S2N_ERR_STUFFER_IS_FULL);
+ POSIX_ENSURE(stuffer->growable, S2N_ERR_STUFFER_IS_FULL);
/* Always grow a stuffer by at least 1k */
const uint32_t growth = MAX(n - s2n_stuffer_space_remaining(stuffer), S2N_MIN_STUFFER_GROWTH_IN_BYTES);
uint32_t new_size = 0;
- GUARD(s2n_add_overflow(stuffer->blob.size, growth, &new_size));
- GUARD(s2n_stuffer_resize(stuffer, new_size));
+ POSIX_GUARD(s2n_add_overflow(stuffer->blob.size, growth, &new_size));
+ POSIX_GUARD(s2n_stuffer_resize(stuffer, new_size));
}
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
@@ -396,16 +403,16 @@ int s2n_stuffer_copy(struct s2n_stuffer *from, struct s2n_stuffer *to, const uin
int s2n_stuffer_extract_blob(struct s2n_stuffer *stuffer, struct s2n_blob *out)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- notnull_check(out);
- GUARD(s2n_realloc(out , s2n_stuffer_data_available(stuffer)));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_ENSURE_REF(out);
+ POSIX_GUARD(s2n_realloc(out , s2n_stuffer_data_available(stuffer)));
if (s2n_stuffer_data_available(stuffer) > 0) {
- memcpy_check(out->data,
+ POSIX_CHECKED_MEMCPY(out->data,
stuffer->blob.data + stuffer->read_cursor,
s2n_stuffer_data_available(stuffer));
}
- POSTCONDITION_POSIX(s2n_blob_validate(out));
+ POSIX_POSTCONDITION(s2n_blob_validate(out));
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer.h b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer.h
index c67e1c4a73..99233e2cde 100644
--- a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer.h
+++ b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer.h
@@ -59,6 +59,7 @@ struct s2n_stuffer {
#define s2n_stuffer_data_available( s ) ((s)->write_cursor - (s)->read_cursor)
#define s2n_stuffer_space_remaining( s ) ((s)->blob.size - (s)->write_cursor)
#define s2n_stuffer_is_wiped( s ) ((s)->high_water_mark == 0)
+#define s2n_stuffer_is_freed( s ) ((s)->blob.data == NULL)
/* Check basic validity constraints on the stuffer: e.g. that cursors point within the blob */
extern S2N_RESULT s2n_stuffer_validate(const struct s2n_stuffer* stuffer);
@@ -123,6 +124,7 @@ struct s2n_stuffer_reservation {
};
/* Check basic validity constraints on the s2n_stuffer_reservation: e.g. stuffer validity. */
extern S2N_RESULT s2n_stuffer_reservation_validate(const struct s2n_stuffer_reservation* reservation);
+int s2n_stuffer_reserve_uint8(struct s2n_stuffer *stuffer, struct s2n_stuffer_reservation *reservation);
extern int s2n_stuffer_reserve_uint16(struct s2n_stuffer *stuffer, struct s2n_stuffer_reservation *reservation);
extern int s2n_stuffer_reserve_uint24(struct s2n_stuffer *stuffer, struct s2n_stuffer_reservation *reservation);
extern int s2n_stuffer_write_vector_size(struct s2n_stuffer_reservation *reservation);
@@ -150,6 +152,7 @@ extern int s2n_stuffer_skip_to_char(struct s2n_stuffer *stuffer, char target);
extern int s2n_stuffer_skip_expected_char(struct s2n_stuffer *stuffer, const char expected, const uint32_t min, const uint32_t max, uint32_t *skipped);
extern int s2n_stuffer_skip_read_until(struct s2n_stuffer *stuffer, const char* target);
extern int s2n_stuffer_alloc_ro_from_string(struct s2n_stuffer *stuffer, const char *str);
+extern int s2n_stuffer_init_ro_from_string(struct s2n_stuffer *stuffer, uint8_t *data, uint32_t length);
/* Read a private key from a PEM encoded stuffer to an ASN1/DER encoded one */
extern int s2n_stuffer_private_key_from_pem(struct s2n_stuffer *pem, struct s2n_stuffer *asn1);
diff --git a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_base64.c b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_base64.c
index bb156ac5f5..526200d234 100644
--- a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_base64.c
+++ b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_base64.c
@@ -74,8 +74,8 @@ bool s2n_is_base64_char(unsigned char c)
*/
int s2n_stuffer_read_base64(struct s2n_stuffer *stuffer, struct s2n_stuffer *out)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- PRECONDITION_POSIX(s2n_stuffer_validate(out));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(out));
int bytes_this_round = 3;
s2n_stack_blob(o, 4, 4);
@@ -84,7 +84,7 @@ int s2n_stuffer_read_base64(struct s2n_stuffer *stuffer, struct s2n_stuffer *out
break;
}
- GUARD(s2n_stuffer_read(stuffer, &o));
+ POSIX_GUARD(s2n_stuffer_read(stuffer, &o));
uint8_t value1 = b64_inverse[o.data[0]];
uint8_t value2 = b64_inverse[o.data[1]];
@@ -95,32 +95,32 @@ int s2n_stuffer_read_base64(struct s2n_stuffer *stuffer, struct s2n_stuffer *out
if (value1 == 255) {
/* Undo the read */
stuffer->read_cursor -= o.size;
- S2N_ERROR(S2N_ERR_INVALID_BASE64);
+ POSIX_BAIL(S2N_ERR_INVALID_BASE64);
}
/* The first two characters can never be '=' and in general
* everything has to be a valid character.
*/
- S2N_ERROR_IF(value1 == 64 || value2 == 64 || value2 == 255 || value3 == 255 || value4 == 255, S2N_ERR_INVALID_BASE64);
+ POSIX_ENSURE(!(value1 == 64 || value2 == 64 || value2 == 255 || value3 == 255 || value4 == 255), S2N_ERR_INVALID_BASE64);
if (o.data[2] == '=') {
/* If there is only one output byte, then the second value
* should have none of its bottom four bits set.
*/
- S2N_ERROR_IF(o.data[3] != '=' || value2 & 0x0f, S2N_ERR_INVALID_BASE64);
+ POSIX_ENSURE(!(o.data[3] != '=' || value2 & 0x0f), S2N_ERR_INVALID_BASE64);
bytes_this_round = 1;
value3 = 0;
value4 = 0;
} else if (o.data[3] == '=') {
/* The last two bits of the final value should be unset */
- S2N_ERROR_IF(value3 & 0x03, S2N_ERR_INVALID_BASE64);
+ POSIX_ENSURE(!(value3 & 0x03), S2N_ERR_INVALID_BASE64);
bytes_this_round = 2;
value4 = 0;
}
/* Advance by bytes_this_round, and then fill in the data */
- GUARD(s2n_stuffer_skip_write(out, bytes_this_round));
+ POSIX_GUARD(s2n_stuffer_skip_write(out, bytes_this_round));
uint8_t *ptr = out->blob.data + out->write_cursor - bytes_this_round;
/* value1 maps to the first 6 bits of the first data byte */
@@ -147,13 +147,13 @@ int s2n_stuffer_read_base64(struct s2n_stuffer *stuffer, struct s2n_stuffer *out
int s2n_stuffer_write_base64(struct s2n_stuffer *stuffer, struct s2n_stuffer *in)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- PRECONDITION_POSIX(s2n_stuffer_validate(in));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(in));
s2n_stack_blob(o, 4, 4);
s2n_stack_blob(i, 3, 3);
while (s2n_stuffer_data_available(in) > 2) {
- GUARD(s2n_stuffer_read(in, &i));
+ POSIX_GUARD(s2n_stuffer_read(in, &i));
/* Take the top 6-bits of the first data byte */
o.data[0] = b64[(i.data[0] >> 2) & 0x3f];
@@ -172,13 +172,13 @@ int s2n_stuffer_write_base64(struct s2n_stuffer *stuffer, struct s2n_stuffer *in
*/
o.data[3] = b64[i.data[2] & 0x3f];
- GUARD(s2n_stuffer_write(stuffer, &o));
+ POSIX_GUARD(s2n_stuffer_write(stuffer, &o));
}
if (s2n_stuffer_data_available(in)) {
/* Read just one byte */
i.size = 1;
- GUARD(s2n_stuffer_read(in, &i));
+ POSIX_GUARD(s2n_stuffer_read(in, &i));
uint8_t c = i.data[0];
/* We at least one data byte left to encode, encode
@@ -196,13 +196,13 @@ int s2n_stuffer_write_base64(struct s2n_stuffer *stuffer, struct s2n_stuffer *in
o.data[2] = '=';
} else {
/* Read the last byte */
- GUARD(s2n_stuffer_read(in, &i));
+ POSIX_GUARD(s2n_stuffer_read(in, &i));
o.data[1] = b64[((c << 4) & 0x30) | ((i.data[0] >> 4) & 0x0f)];
o.data[2] = b64[((i.data[0] << 2) & 0x3c)];
}
- GUARD(s2n_stuffer_write(stuffer, &o));
+ POSIX_GUARD(s2n_stuffer_write(stuffer, &o));
}
return S2N_SUCCESS;
diff --git a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_file.c b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_file.c
index b86e725604..4deb666250 100644
--- a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_file.c
+++ b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_file.c
@@ -28,43 +28,44 @@
int s2n_stuffer_recv_from_fd(struct s2n_stuffer *stuffer, const int rfd, const uint32_t len, uint32_t *bytes_written)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+
/* Make sure we have enough space to write */
- GUARD(s2n_stuffer_skip_write(stuffer, len));
+ POSIX_GUARD(s2n_stuffer_skip_write(stuffer, len));
/* "undo" the skip write */
stuffer->write_cursor -= len;
ssize_t r = 0;
do {
+ POSIX_ENSURE(stuffer->blob.data && (r >= 0 || errno == EINTR), S2N_ERR_READ);
r = read(rfd, stuffer->blob.data + stuffer->write_cursor, len);
- S2N_ERROR_IF(r < 0 && errno != EINTR, S2N_ERR_READ);
} while (r < 0);
/* Record just how many bytes we have written */
- S2N_ERROR_IF(r > UINT32_MAX, S2N_ERR_INTEGER_OVERFLOW);
- GUARD(s2n_stuffer_skip_write(stuffer, (uint32_t)r));
+ POSIX_ENSURE(r <= UINT32_MAX, S2N_ERR_INTEGER_OVERFLOW);
+ POSIX_GUARD(s2n_stuffer_skip_write(stuffer, (uint32_t)r));
if (bytes_written != NULL) *bytes_written = r;
return S2N_SUCCESS;
}
int s2n_stuffer_send_to_fd(struct s2n_stuffer *stuffer, const int wfd, const uint32_t len, uint32_t *bytes_sent)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
/* Make sure we even have the data */
- GUARD(s2n_stuffer_skip_read(stuffer, len));
+ POSIX_GUARD(s2n_stuffer_skip_read(stuffer, len));
/* "undo" the skip read */
stuffer->read_cursor -= len;
ssize_t w = 0;
do {
+ POSIX_ENSURE(stuffer->blob.data && (w >= 0 || errno == EINTR), S2N_ERR_WRITE);
w = write(wfd, stuffer->blob.data + stuffer->read_cursor, len);
- S2N_ERROR_IF(w < 0 && errno != EINTR, S2N_ERR_WRITE);
} while (w < 0);
- S2N_ERROR_IF(w > UINT32_MAX - stuffer->read_cursor, S2N_ERR_INTEGER_OVERFLOW);
+ POSIX_ENSURE(w <= UINT32_MAX - stuffer->read_cursor, S2N_ERR_INTEGER_OVERFLOW);
stuffer->read_cursor += w;
if (bytes_sent != NULL) *bytes_sent = w;
return S2N_SUCCESS;
@@ -72,36 +73,36 @@ int s2n_stuffer_send_to_fd(struct s2n_stuffer *stuffer, const int wfd, const uin
int s2n_stuffer_alloc_ro_from_fd(struct s2n_stuffer *stuffer, int rfd)
{
- ENSURE_POSIX_MUT(stuffer);
+ POSIX_ENSURE_MUT(stuffer);
struct stat st = {0};
- ENSURE_POSIX(fstat(rfd, &st) >= 0, S2N_ERR_FSTAT);
+ POSIX_ENSURE(fstat(rfd, &st) >= 0, S2N_ERR_FSTAT);
- ENSURE_POSIX(st.st_size > 0, S2N_FAILURE);
- ENSURE_POSIX(st.st_size <= UINT32_MAX, S2N_FAILURE);
+ POSIX_ENSURE(st.st_size > 0, S2N_FAILURE);
+ POSIX_ENSURE(st.st_size <= UINT32_MAX, S2N_FAILURE);
uint8_t *map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, rfd, 0);
- ENSURE_POSIX(map != MAP_FAILED, S2N_ERR_MMAP);
+ POSIX_ENSURE(map != MAP_FAILED, S2N_ERR_MMAP);
struct s2n_blob b = {0};
- ENSURE_POSIX(s2n_blob_init(&b, map, (uint32_t)st.st_size), S2N_FAILURE);
+ POSIX_ENSURE(s2n_blob_init(&b, map, (uint32_t)st.st_size), S2N_FAILURE);
return s2n_stuffer_init(stuffer, &b);
}
int s2n_stuffer_alloc_ro_from_file(struct s2n_stuffer *stuffer, const char *file)
{
- ENSURE_POSIX_MUT(stuffer);
- notnull_check(file);
+ POSIX_ENSURE_MUT(stuffer);
+ POSIX_ENSURE_REF(file);
int fd;
do {
fd = open(file, O_RDONLY);
- ENSURE_POSIX(fd >= 0 || errno == EINTR, S2N_ERR_OPEN);
+ POSIX_ENSURE(fd >= 0 || errno == EINTR, S2N_ERR_OPEN);
} while (fd < 0);
int r = s2n_stuffer_alloc_ro_from_fd(stuffer, fd);
- GUARD(close(fd));
+ POSIX_GUARD(close(fd));
return r;
}
diff --git a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_network_order.c b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_network_order.c
index eb75409b6a..57cd149255 100644
--- a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_network_order.c
+++ b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_network_order.c
@@ -23,52 +23,57 @@
/* Writes length bytes of input to stuffer, in network order, starting from the smallest byte of input. */
int s2n_stuffer_write_network_order(struct s2n_stuffer *stuffer, const uint64_t input, const uint8_t length)
{
- ENSURE_POSIX(length <= sizeof(input), S2N_ERR_SAFETY);
- GUARD(s2n_stuffer_skip_write(stuffer, length));
- uint8_t *data = stuffer->blob.data + stuffer->write_cursor - length;
+ POSIX_ENSURE(length <= sizeof(input), S2N_ERR_SAFETY);
+ POSIX_GUARD(s2n_stuffer_skip_write(stuffer, length));
+ uint8_t *data = (stuffer->blob.data) ? (stuffer->blob.data + stuffer->write_cursor - length) : NULL;
for (int i = 0; i < length; i++) {
S2N_INVARIANT(i <= length);
uint8_t shift = (length - i - 1) * CHAR_BIT;
data[i] = (input >> (shift)) & UINT8_MAX;
}
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
int s2n_stuffer_reserve(struct s2n_stuffer *stuffer, struct s2n_stuffer_reservation *reservation, const uint8_t length)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- notnull_check(reservation);
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_ENSURE_REF(reservation);
*reservation = (struct s2n_stuffer_reservation) {.stuffer = stuffer, .write_cursor = stuffer->write_cursor, .length = length};
- GUARD(s2n_stuffer_skip_write(stuffer, reservation->length));
- memset_check(stuffer->blob.data + reservation->write_cursor, S2N_WIPE_PATTERN, reservation->length);
- POSTCONDITION_POSIX(s2n_stuffer_reservation_validate(reservation));
+ POSIX_GUARD(s2n_stuffer_skip_write(stuffer, reservation->length));
+ POSIX_CHECKED_MEMSET(stuffer->blob.data + reservation->write_cursor, S2N_WIPE_PATTERN, reservation->length);
+ POSIX_POSTCONDITION(s2n_stuffer_reservation_validate(reservation));
return S2N_SUCCESS;
}
int s2n_stuffer_read_uint8(struct s2n_stuffer *stuffer, uint8_t * u)
{
- GUARD(s2n_stuffer_read_bytes(stuffer, u, sizeof(uint8_t)));
+ POSIX_GUARD(s2n_stuffer_read_bytes(stuffer, u, sizeof(uint8_t)));
return S2N_SUCCESS;
}
int s2n_stuffer_write_uint8(struct s2n_stuffer *stuffer, const uint8_t u)
{
- GUARD(s2n_stuffer_write_bytes(stuffer, &u, sizeof(u)));
+ POSIX_GUARD(s2n_stuffer_write_bytes(stuffer, &u, sizeof(u)));
return S2N_SUCCESS;
}
+int s2n_stuffer_reserve_uint8(struct s2n_stuffer *stuffer, struct s2n_stuffer_reservation *reservation)
+{
+ return s2n_stuffer_reserve(stuffer, reservation, sizeof(uint8_t));
+}
+
int s2n_stuffer_read_uint16(struct s2n_stuffer *stuffer, uint16_t * u)
{
- notnull_check(u);
+ POSIX_ENSURE_REF(u);
uint8_t data[sizeof(uint16_t)];
- GUARD(s2n_stuffer_read_bytes(stuffer, data, sizeof(data)));
+ POSIX_GUARD(s2n_stuffer_read_bytes(stuffer, data, sizeof(data)));
*u = data[0] << 8;
*u |= data[1];
@@ -88,10 +93,10 @@ int s2n_stuffer_reserve_uint16(struct s2n_stuffer *stuffer, struct s2n_stuffer_r
int s2n_stuffer_read_uint24(struct s2n_stuffer *stuffer, uint32_t * u)
{
- notnull_check(u);
+ POSIX_ENSURE_REF(u);
uint8_t data[SIZEOF_UINT24];
- GUARD(s2n_stuffer_read_bytes(stuffer, data, sizeof(data)));
+ POSIX_GUARD(s2n_stuffer_read_bytes(stuffer, data, sizeof(data)));
*u = data[0] << 16;
*u |= data[1] << 8;
@@ -112,10 +117,10 @@ int s2n_stuffer_reserve_uint24(struct s2n_stuffer *stuffer, struct s2n_stuffer_r
int s2n_stuffer_read_uint32(struct s2n_stuffer *stuffer, uint32_t * u)
{
- notnull_check(u);
+ POSIX_ENSURE_REF(u);
uint8_t data[sizeof(uint32_t)];
- GUARD(s2n_stuffer_read_bytes(stuffer, data, sizeof(data)));
+ POSIX_GUARD(s2n_stuffer_read_bytes(stuffer, data, sizeof(data)));
*u = ((uint32_t) data[0]) << 24;
*u |= data[1] << 16;
@@ -132,10 +137,10 @@ int s2n_stuffer_write_uint32(struct s2n_stuffer *stuffer, const uint32_t u)
int s2n_stuffer_read_uint64(struct s2n_stuffer *stuffer, uint64_t * u)
{
- notnull_check(u);
+ POSIX_ENSURE_REF(u);
uint8_t data[sizeof(uint64_t)];
- GUARD(s2n_stuffer_read_bytes(stuffer, data, sizeof(data)));
+ POSIX_GUARD(s2n_stuffer_read_bytes(stuffer, data, sizeof(data)));
*u = ((uint64_t) data[0]) << 56;
*u |= ((uint64_t) data[1]) << 48;
@@ -157,11 +162,11 @@ int s2n_stuffer_write_uint64(struct s2n_stuffer *stuffer, const uint64_t u)
static int length_matches_value_check(uint32_t value, uint8_t length)
{
/* Value is represented as a uint32_t, so shouldn't be assumed larger */
- S2N_ERROR_IF(length > sizeof(uint32_t), S2N_ERR_SIZE_MISMATCH);
+ POSIX_ENSURE(length <= sizeof(uint32_t), S2N_ERR_SIZE_MISMATCH);
if (length < sizeof(uint32_t)) {
/* Value should be less than the maximum for its length */
- S2N_ERROR_IF(value >= (0x01 << (length * 8)), S2N_ERR_SIZE_MISMATCH);
+ POSIX_ENSURE(value < (0x01 << (length * 8)), S2N_ERR_SIZE_MISMATCH);
}
return S2N_SUCCESS;
@@ -170,17 +175,17 @@ static int length_matches_value_check(uint32_t value, uint8_t length)
static int s2n_stuffer_write_reservation_impl(struct s2n_stuffer_reservation* reservation, const uint32_t u)
{
reservation->stuffer->write_cursor = reservation->write_cursor;
- PRECONDITION_POSIX(s2n_stuffer_validate(reservation->stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(reservation->stuffer));
- GUARD(length_matches_value_check(u, reservation->length));
- GUARD(s2n_stuffer_write_network_order(reservation->stuffer, u, reservation->length));
- POSTCONDITION_POSIX(s2n_stuffer_validate(reservation->stuffer));
+ POSIX_GUARD(length_matches_value_check(u, reservation->length));
+ POSIX_GUARD(s2n_stuffer_write_network_order(reservation->stuffer, u, reservation->length));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(reservation->stuffer));
return S2N_SUCCESS;
}
int s2n_stuffer_write_reservation(struct s2n_stuffer_reservation* reservation, const uint32_t u)
{
- PRECONDITION_POSIX(s2n_stuffer_reservation_validate(reservation));
+ POSIX_PRECONDITION(s2n_stuffer_reservation_validate(reservation));
uint32_t old_write_cursor = reservation->stuffer->write_cursor;
int result = s2n_stuffer_write_reservation_impl(reservation, u);
reservation->stuffer->write_cursor = old_write_cursor;
@@ -189,9 +194,9 @@ int s2n_stuffer_write_reservation(struct s2n_stuffer_reservation* reservation, c
int s2n_stuffer_write_vector_size(struct s2n_stuffer_reservation* reservation)
{
- PRECONDITION_POSIX(s2n_stuffer_reservation_validate(reservation));
+ POSIX_PRECONDITION(s2n_stuffer_reservation_validate(reservation));
uint32_t size = 0;
- GUARD(s2n_sub_overflow(reservation->stuffer->write_cursor, reservation->write_cursor, &size));
- GUARD(s2n_sub_overflow(size, reservation->length, &size));
+ POSIX_GUARD(s2n_sub_overflow(reservation->stuffer->write_cursor, reservation->write_cursor, &size));
+ POSIX_GUARD(s2n_sub_overflow(size, reservation->length, &size));
return s2n_stuffer_write_reservation(reservation, size);
}
diff --git a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_pem.c b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_pem.c
index a0805980c2..6c18694179 100644
--- a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_pem.c
+++ b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_pem.c
@@ -35,25 +35,25 @@
static int s2n_stuffer_pem_read_encapsulation_line(struct s2n_stuffer *pem, const char* encap_marker, const char *keyword) {
/* Skip any number of Chars until a "-" is reached */
- GUARD(s2n_stuffer_skip_to_char(pem, S2N_PEM_DELIMTER_CHAR));
+ POSIX_GUARD(s2n_stuffer_skip_to_char(pem, S2N_PEM_DELIMTER_CHAR));
/* Ensure between 1 and 64 '-' chars at start of line */
- GUARD(s2n_stuffer_skip_expected_char(pem, S2N_PEM_DELIMTER_CHAR, S2N_PEM_DELIMITER_MIN_COUNT, S2N_PEM_DELIMITER_MAX_COUNT, NULL));
+ POSIX_GUARD(s2n_stuffer_skip_expected_char(pem, S2N_PEM_DELIMTER_CHAR, S2N_PEM_DELIMITER_MIN_COUNT, S2N_PEM_DELIMITER_MAX_COUNT, NULL));
/* Ensure next string in stuffer is "BEGIN " or "END " */
- GUARD(s2n_stuffer_read_expected_str(pem, encap_marker));
+ POSIX_GUARD(s2n_stuffer_read_expected_str(pem, encap_marker));
/* Ensure next string is stuffer is the keyword (Eg "CERTIFICATE", "PRIVATE KEY", etc) */
- GUARD(s2n_stuffer_read_expected_str(pem, keyword));
+ POSIX_GUARD(s2n_stuffer_read_expected_str(pem, keyword));
/* Ensure between 1 and 64 '-' chars at end of line */
- GUARD(s2n_stuffer_skip_expected_char(pem, S2N_PEM_DELIMTER_CHAR, S2N_PEM_DELIMITER_MIN_COUNT, S2N_PEM_DELIMITER_MAX_COUNT, NULL));
+ POSIX_GUARD(s2n_stuffer_skip_expected_char(pem, S2N_PEM_DELIMTER_CHAR, S2N_PEM_DELIMITER_MIN_COUNT, S2N_PEM_DELIMITER_MAX_COUNT, NULL));
/* Check for missing newline between dashes case: "-----END CERTIFICATE----------BEGIN CERTIFICATE-----" */
if (strncmp(encap_marker, S2N_PEM_END_TOKEN, strlen(S2N_PEM_END_TOKEN)) == 0
&& s2n_stuffer_peek_check_for_str(pem, S2N_PEM_BEGIN_TOKEN) == S2N_SUCCESS) {
/* Rewind stuffer by 1 byte before BEGIN, so that next read will find the dash before the BEGIN */
- GUARD(s2n_stuffer_rewind_read(pem, 1));
+ POSIX_GUARD(s2n_stuffer_rewind_read(pem, 1));
}
/* Skip newlines and other whitepsace that may be after the dashes */
@@ -74,11 +74,11 @@ static int s2n_stuffer_pem_read_contents(struct s2n_stuffer *pem, struct s2n_stu
{
s2n_stack_blob(base64__blob, 64, 64);
struct s2n_stuffer base64_stuffer = {0};
- GUARD(s2n_stuffer_init(&base64_stuffer, &base64__blob));
+ POSIX_GUARD(s2n_stuffer_init(&base64_stuffer, &base64__blob));
while (1) {
/* We need a byte... */
- ENSURE_POSIX(s2n_stuffer_data_available(pem) >= 1, S2N_ERR_STUFFER_OUT_OF_DATA);
+ POSIX_ENSURE(s2n_stuffer_data_available(pem) >= 1, S2N_ERR_STUFFER_OUT_OF_DATA);
/* Peek to see if the next char is a dash, meaning end of pem_contents */
uint8_t c = pem->blob.data[pem->read_cursor];
@@ -95,39 +95,39 @@ static int s2n_stuffer_pem_read_contents(struct s2n_stuffer *pem, struct s2n_stu
/* Flush base64_stuffer to asn1 stuffer if we're out of space, and reset base64_stuffer read/write pointers */
if (s2n_stuffer_space_remaining(&base64_stuffer) == 0) {
- GUARD(s2n_stuffer_read_base64(&base64_stuffer, asn1));
- GUARD(s2n_stuffer_rewrite(&base64_stuffer));
+ POSIX_GUARD(s2n_stuffer_read_base64(&base64_stuffer, asn1));
+ POSIX_GUARD(s2n_stuffer_rewrite(&base64_stuffer));
}
/* Copy next char to base64_stuffer */
- GUARD(s2n_stuffer_write_bytes(&base64_stuffer, (uint8_t *) &c, 1));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&base64_stuffer, (uint8_t *) &c, 1));
};
/* Flush any remaining bytes to asn1 */
- GUARD(s2n_stuffer_read_base64(&base64_stuffer, asn1));
+ POSIX_GUARD(s2n_stuffer_read_base64(&base64_stuffer, asn1));
return S2N_SUCCESS;
}
static int s2n_stuffer_data_from_pem(struct s2n_stuffer *pem, struct s2n_stuffer *asn1, const char *keyword)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(pem));
- PRECONDITION_POSIX(s2n_stuffer_validate(asn1));
- notnull_check(keyword);
+ POSIX_PRECONDITION(s2n_stuffer_validate(pem));
+ POSIX_PRECONDITION(s2n_stuffer_validate(asn1));
+ POSIX_ENSURE_REF(keyword);
- GUARD(s2n_stuffer_pem_read_begin(pem, keyword));
- GUARD(s2n_stuffer_pem_read_contents(pem, asn1));
- GUARD(s2n_stuffer_pem_read_end(pem, keyword));
+ POSIX_GUARD(s2n_stuffer_pem_read_begin(pem, keyword));
+ POSIX_GUARD(s2n_stuffer_pem_read_contents(pem, asn1));
+ POSIX_GUARD(s2n_stuffer_pem_read_end(pem, keyword));
- POSTCONDITION_POSIX(s2n_stuffer_validate(pem));
- POSTCONDITION_POSIX(s2n_stuffer_validate(asn1));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(pem));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(asn1));
return S2N_SUCCESS;
}
int s2n_stuffer_private_key_from_pem(struct s2n_stuffer *pem, struct s2n_stuffer *asn1) {
- PRECONDITION_POSIX(s2n_stuffer_validate(pem));
- PRECONDITION_POSIX(s2n_stuffer_validate(asn1));
+ POSIX_PRECONDITION(s2n_stuffer_validate(pem));
+ POSIX_PRECONDITION(s2n_stuffer_validate(asn1));
int rc;
rc = s2n_stuffer_data_from_pem(pem, asn1, S2N_PEM_PKCS1_RSA_PRIVATE_KEY);
diff --git a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_text.c b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_text.c
index dc60ca5099..9372717c08 100644
--- a/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_text.c
+++ b/contrib/restricted/aws/s2n/stuffer/s2n_stuffer_text.c
@@ -26,106 +26,102 @@ int s2n_stuffer_peek_char(struct s2n_stuffer *s2n_stuffer, char *c)
if (r == S2N_SUCCESS) {
s2n_stuffer->read_cursor--;
}
- POSTCONDITION_POSIX(s2n_stuffer_validate(s2n_stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(s2n_stuffer));
return r;
}
/* Peeks in stuffer to see if expected string is present. */
int s2n_stuffer_peek_check_for_str(struct s2n_stuffer *s2n_stuffer, const char *expected)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(s2n_stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(s2n_stuffer));
uint32_t orig_read_pos = s2n_stuffer->read_cursor;
int rc = s2n_stuffer_read_expected_str(s2n_stuffer, expected);
s2n_stuffer->read_cursor = orig_read_pos;
- POSTCONDITION_POSIX(s2n_stuffer_validate(s2n_stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(s2n_stuffer));
return rc;
}
int s2n_stuffer_skip_whitespace(struct s2n_stuffer *s2n_stuffer, uint32_t *skipped)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(s2n_stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(s2n_stuffer));
uint32_t initial_read_cursor = s2n_stuffer->read_cursor;
- while (s2n_stuffer->read_cursor < s2n_stuffer->write_cursor) {
- switch (s2n_stuffer->blob.data[s2n_stuffer->read_cursor]) {
- case ' ': /* We don't use isspace, because it changes under locales */
- case '\t':
- case '\n':
- case '\r':
+ while (s2n_stuffer_data_available(s2n_stuffer)) {
+ uint8_t c = s2n_stuffer->blob.data[s2n_stuffer->read_cursor];
+ /* We don't use isspace, because it changes under locales. */
+ if (c == ' ' || c == '\t' || c == '\n' || c == '\r') {
s2n_stuffer->read_cursor += 1;
+ } else {
break;
- default:
- goto finished;
}
}
- finished:
if(skipped != NULL) *skipped = s2n_stuffer->read_cursor - initial_read_cursor;
- POSTCONDITION_POSIX(s2n_stuffer_validate(s2n_stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(s2n_stuffer));
return S2N_SUCCESS;
}
int s2n_stuffer_read_expected_str(struct s2n_stuffer *stuffer, const char *expected)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- notnull_check(expected);
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_ENSURE_REF(expected);
size_t expected_length = strlen(expected);
if (expected_length == 0) {
return S2N_SUCCESS;
}
- ENSURE_POSIX(s2n_stuffer_data_available(stuffer) >= expected_length, S2N_ERR_STUFFER_OUT_OF_DATA);
+ POSIX_ENSURE(s2n_stuffer_data_available(stuffer) >= expected_length, S2N_ERR_STUFFER_OUT_OF_DATA);
uint8_t *actual = stuffer->blob.data + stuffer->read_cursor;
- notnull_check(actual);
- ENSURE_POSIX(!memcmp(actual, expected, expected_length), S2N_ERR_STUFFER_NOT_FOUND);
+ POSIX_ENSURE_REF(actual);
+ POSIX_ENSURE(!memcmp(actual, expected, expected_length), S2N_ERR_STUFFER_NOT_FOUND);
stuffer->read_cursor += expected_length;
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
/* Read from stuffer until the target string is found, or until there is no more data. */
int s2n_stuffer_skip_read_until(struct s2n_stuffer *stuffer, const char *target)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- notnull_check(target);
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_ENSURE_REF(target);
const int len = strlen(target);
if (len == 0) {
return S2N_SUCCESS;
}
while (s2n_stuffer_data_available(stuffer) >= len) {
- GUARD(s2n_stuffer_skip_to_char(stuffer, target[0]));
- GUARD(s2n_stuffer_skip_read(stuffer, len));
+ POSIX_GUARD(s2n_stuffer_skip_to_char(stuffer, target[0]));
+ POSIX_GUARD(s2n_stuffer_skip_read(stuffer, len));
uint8_t *actual = stuffer->blob.data + stuffer->read_cursor - len;
- notnull_check(actual);
+ POSIX_ENSURE_REF(actual);
if (strncmp((char*)actual, target, len) == 0){
return S2N_SUCCESS;
} else {
/* If string doesn't match, rewind stuffer to 1 byte after last read */
- GUARD(s2n_stuffer_rewind_read(stuffer, len - 1));
+ POSIX_GUARD(s2n_stuffer_rewind_read(stuffer, len - 1));
continue;
}
}
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
/* Skips the stuffer until the first instance of the target character or until there is no more data. */
int s2n_stuffer_skip_to_char(struct s2n_stuffer *stuffer, const char target)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
while (s2n_stuffer_data_available(stuffer) > 0) {
if (stuffer->blob.data[stuffer->read_cursor] == target) {
break;
}
stuffer->read_cursor += 1;
}
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
/* Skips an expected character in the stuffer between min and max times */
int s2n_stuffer_skip_expected_char(struct s2n_stuffer *stuffer, const char expected, const uint32_t min, const uint32_t max, uint32_t *skipped)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- ENSURE_POSIX(min <= max, S2N_ERR_SAFETY);
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_ENSURE(min <= max, S2N_ERR_SAFETY);
uint32_t skip = 0;
while (stuffer->read_cursor < stuffer->write_cursor && skip < max) {
@@ -136,33 +132,33 @@ int s2n_stuffer_skip_expected_char(struct s2n_stuffer *stuffer, const char expec
break;
}
}
- ENSURE_POSIX(skip >= min, S2N_ERR_STUFFER_NOT_FOUND);
+ POSIX_ENSURE(skip >= min, S2N_ERR_STUFFER_NOT_FOUND);
if(skipped != NULL) *skipped = skip;
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
return S2N_SUCCESS;
}
/* Read a line of text. Agnostic to LF or CR+LF line endings. */
int s2n_stuffer_read_line(struct s2n_stuffer *stuffer, struct s2n_stuffer *token)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- PRECONDITION_POSIX(s2n_stuffer_validate(token));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(token));
/* Consume an LF terminated line */
- GUARD(s2n_stuffer_read_token(stuffer, token, '\n'));
+ POSIX_GUARD(s2n_stuffer_read_token(stuffer, token, '\n'));
/* Snip off the carriage return if it's present */
if ((s2n_stuffer_data_available(token) > 0) && (token->blob.data[(token->write_cursor - 1)] == '\r')) {
token->write_cursor--;
}
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
- POSTCONDITION_POSIX(s2n_stuffer_validate(token));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(token));
return S2N_SUCCESS;
}
int s2n_stuffer_read_token(struct s2n_stuffer *stuffer, struct s2n_stuffer *token, char delim)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- PRECONDITION_POSIX(s2n_stuffer_validate(token));
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_PRECONDITION(s2n_stuffer_validate(token));
uint32_t token_size = 0;
while ((stuffer->read_cursor + token_size) < stuffer->write_cursor) {
@@ -172,23 +168,37 @@ int s2n_stuffer_read_token(struct s2n_stuffer *stuffer, struct s2n_stuffer *toke
token_size++;
}
- GUARD(s2n_stuffer_copy(stuffer, token, token_size));
+ POSIX_GUARD(s2n_stuffer_copy(stuffer, token, token_size));
/* Consume the delimiter too */
if (stuffer->read_cursor < stuffer->write_cursor) {
stuffer->read_cursor++;
}
- POSTCONDITION_POSIX(s2n_stuffer_validate(stuffer));
- POSTCONDITION_POSIX(s2n_stuffer_validate(token));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(token));
return S2N_SUCCESS;
}
int s2n_stuffer_alloc_ro_from_string(struct s2n_stuffer *stuffer, const char *str)
{
- PRECONDITION_POSIX(s2n_stuffer_validate(stuffer));
- notnull_check(str);
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_ENSURE_REF(str);
uint32_t length = strlen(str);
- GUARD(s2n_stuffer_alloc(stuffer, length + 1));
+ POSIX_GUARD(s2n_stuffer_alloc(stuffer, length + 1));
return s2n_stuffer_write_bytes(stuffer, (const uint8_t *)str, length);
}
+
+int s2n_stuffer_init_ro_from_string(struct s2n_stuffer *stuffer, uint8_t *data, uint32_t length)
+{
+ POSIX_PRECONDITION(s2n_stuffer_validate(stuffer));
+ POSIX_ENSURE_REF(data);
+
+ struct s2n_blob data_blob = { 0 };
+ POSIX_GUARD(s2n_blob_init(&data_blob, data, length));
+
+ POSIX_GUARD(s2n_stuffer_init(stuffer, &data_blob));
+ POSIX_GUARD(s2n_stuffer_skip_write(stuffer, length));
+
+ return S2N_SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_alpn.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_alpn.c
index e66b7c0478..b7fd2678ed 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_alpn.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_alpn.c
@@ -19,6 +19,7 @@
#include "tls/extensions/s2n_client_alpn.h"
#include "tls/extensions/s2n_extension_type.h"
+#include "tls/s2n_protocol_preferences.h"
#include "tls/s2n_tls.h"
#include "tls/s2n_tls_parameters.h"
@@ -48,11 +49,11 @@ static bool s2n_client_alpn_should_send(struct s2n_connection *conn)
static int s2n_client_alpn_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
struct s2n_blob *client_app_protocols;
- GUARD(s2n_connection_get_protocol_preferences(conn, &client_app_protocols));
- notnull_check(client_app_protocols);
+ POSIX_GUARD(s2n_connection_get_protocol_preferences(conn, &client_app_protocols));
+ POSIX_ENSURE_REF(client_app_protocols);
- GUARD(s2n_stuffer_write_uint16(out, client_app_protocols->size));
- GUARD(s2n_stuffer_write(out, client_app_protocols));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, client_app_protocols->size));
+ POSIX_GUARD(s2n_stuffer_write(out, client_app_protocols));
return S2N_SUCCESS;
}
@@ -60,18 +61,17 @@ static int s2n_client_alpn_send(struct s2n_connection *conn, struct s2n_stuffer
static int s2n_client_alpn_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
uint16_t size_of_all;
- struct s2n_stuffer client_protos = {0};
struct s2n_stuffer server_protos = {0};
struct s2n_blob *server_app_protocols;
- GUARD(s2n_connection_get_protocol_preferences(conn, &server_app_protocols));
+ POSIX_GUARD(s2n_connection_get_protocol_preferences(conn, &server_app_protocols));
if (!server_app_protocols->size) {
/* No protocols configured, nothing to do */
return S2N_SUCCESS;
}
- GUARD(s2n_stuffer_read_uint16(extension, &size_of_all));
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &size_of_all));
if (size_of_all > s2n_stuffer_data_available(extension) || size_of_all < 3) {
/* Malformed length, ignore the extension */
return S2N_SUCCESS;
@@ -80,38 +80,26 @@ static int s2n_client_alpn_recv(struct s2n_connection *conn, struct s2n_stuffer
struct s2n_blob client_app_protocols = { 0 };
client_app_protocols.size = size_of_all;
client_app_protocols.data = s2n_stuffer_raw_read(extension, size_of_all);
- notnull_check(client_app_protocols.data);
+ POSIX_ENSURE_REF(client_app_protocols.data);
/* Find a matching protocol */
- GUARD(s2n_stuffer_init(&client_protos, &client_app_protocols));
- GUARD(s2n_stuffer_write(&client_protos, &client_app_protocols));
- GUARD(s2n_stuffer_init(&server_protos, server_app_protocols));
- GUARD(s2n_stuffer_write(&server_protos, server_app_protocols));
-
- while (s2n_stuffer_data_available(&server_protos)) {
- uint8_t length;
- uint8_t server_protocol[255];
- GUARD(s2n_stuffer_read_uint8(&server_protos, &length));
- GUARD(s2n_stuffer_read_bytes(&server_protos, server_protocol, length));
-
- while (s2n_stuffer_data_available(&client_protos)) {
- uint8_t client_length;
- GUARD(s2n_stuffer_read_uint8(&client_protos, &client_length));
- S2N_ERROR_IF(client_length > s2n_stuffer_data_available(&client_protos), S2N_ERR_BAD_MESSAGE);
- if (client_length != length) {
- GUARD(s2n_stuffer_skip_read(&client_protos, client_length));
- } else {
- uint8_t client_protocol[255];
- GUARD(s2n_stuffer_read_bytes(&client_protos, client_protocol, client_length));
- if (memcmp(client_protocol, server_protocol, client_length) == 0) {
- memcpy_check(conn->application_protocol, client_protocol, client_length);
- conn->application_protocol[client_length] = '\0';
- return S2N_SUCCESS;
- }
- }
+ POSIX_GUARD(s2n_stuffer_init(&server_protos, server_app_protocols));
+ POSIX_GUARD(s2n_stuffer_skip_write(&server_protos, server_app_protocols->size));
+
+ while (s2n_stuffer_data_available(&server_protos) > 0) {
+ struct s2n_blob server_protocol = { 0 };
+ POSIX_ENSURE(s2n_result_is_ok(s2n_protocol_preferences_read(&server_protos, &server_protocol)),
+ S2N_ERR_BAD_MESSAGE);
+
+ bool is_match = false;
+ POSIX_ENSURE(s2n_result_is_ok(s2n_protocol_preferences_contain(&client_app_protocols, &server_protocol, &is_match)),
+ S2N_ERR_BAD_MESSAGE);
+
+ if (is_match) {
+ POSIX_CHECKED_MEMCPY(conn->application_protocol, server_protocol.data, server_protocol.size);
+ conn->application_protocol[server_protocol.size] = '\0';
+ return S2N_SUCCESS;
}
-
- GUARD(s2n_stuffer_reread(&client_protos));
}
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_early_data_indication.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_early_data_indication.c
new file mode 100644
index 0000000000..3cfea591cd
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_early_data_indication.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include "api/s2n.h"
+
+#include "tls/extensions/s2n_early_data_indication.h"
+
+#include "tls/extensions/s2n_client_psk.h"
+#include "tls/s2n_cipher_suites.h"
+#include "tls/s2n_early_data.h"
+#include "tls/s2n_protocol_preferences.h"
+#include "tls/s2n_tls13.h"
+#include "utils/s2n_safety.h"
+
+/* S2N determines the handshake type after the ServerHello, but that will be
+ * too late to handle the early data + middlebox compatibility case:
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-D.4
+ *# - If not offering early data, the client sends a dummy
+ *# change_cipher_spec record (see the third paragraph of Section 5)
+ *# immediately before its second flight. This may either be before
+ *# its second ClientHello or before its encrypted handshake flight.
+ *# If offering early data, the record is placed immediately after the
+ *# first ClientHello.
+ *
+ * We need to set the handshake type flags in question during the ClientHello.
+ * This will require special [INITIAL | MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS]
+ * entries in the handshake arrays.
+ */
+static S2N_RESULT s2n_setup_middlebox_compat_for_early_data(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+
+ if (s2n_is_middlebox_compat_enabled(conn)) {
+ RESULT_GUARD(s2n_handshake_type_set_tls13_flag(conn, MIDDLEBOX_COMPAT));
+ RESULT_GUARD(s2n_handshake_type_set_tls13_flag(conn, EARLY_CLIENT_CCS));
+ }
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_early_data_config_is_possible(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+
+ struct s2n_psk *first_psk = NULL;
+ RESULT_GUARD(s2n_array_get(&conn->psk_params.psk_list, 0, (void**) &first_psk));
+ RESULT_ENSURE_REF(first_psk);
+
+ struct s2n_early_data_config *early_data_config = &first_psk->early_data_config;
+
+ /* Must support early data */
+ RESULT_ENSURE_GT(early_data_config->max_early_data_size, 0);
+
+ /* Early data must require a protocol than we could negotiate */
+ RESULT_ENSURE_GTE(s2n_connection_get_protocol_version(conn), early_data_config->protocol_version);
+ RESULT_ENSURE_GTE(s2n_connection_get_protocol_version(conn), S2N_TLS13);
+
+ const struct s2n_cipher_preferences *cipher_preferences = NULL;
+ RESULT_GUARD_POSIX(s2n_connection_get_cipher_preferences(conn, &cipher_preferences));
+ RESULT_ENSURE_REF(cipher_preferences);
+
+ /* Early data must require a supported cipher */
+ bool match = false;
+ for (uint8_t i = 0; i < cipher_preferences->count; i++) {
+ if (cipher_preferences->suites[i] == early_data_config->cipher_suite) {
+ match = true;
+ break;
+ }
+ }
+ RESULT_ENSURE_EQ(match, true);
+
+ /* If early data specifies an application protocol, it must be supported by protocol preferences */
+ if (early_data_config->application_protocol.size > 0) {
+ struct s2n_blob *application_protocols = NULL;
+ RESULT_GUARD_POSIX(s2n_connection_get_protocol_preferences(conn, &application_protocols));
+ RESULT_ENSURE_REF(application_protocols);
+
+ match = false;
+ RESULT_GUARD(s2n_protocol_preferences_contain(application_protocols, &early_data_config->application_protocol, &match));
+ RESULT_ENSURE_EQ(match, true);
+ }
+
+ return S2N_RESULT_OK;
+}
+
+static bool s2n_client_early_data_indication_should_send(struct s2n_connection *conn)
+{
+ return s2n_result_is_ok(s2n_early_data_config_is_possible(conn))
+ && conn && conn->early_data_expected
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# A client MUST NOT include the
+ *# "early_data" extension in its followup ClientHello.
+ **/
+ && !s2n_is_hello_retry_handshake(conn)
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# When a PSK is used and early data is allowed for that PSK, the client
+ *# can send Application Data in its first flight of messages. If the
+ *# client opts to do so, it MUST supply both the "pre_shared_key" and
+ *# "early_data" extensions.
+ */
+ && s2n_client_psk_extension.should_send(conn);
+}
+
+static int s2n_client_early_data_indication_is_missing(struct s2n_connection *conn)
+{
+ if (conn->early_data_state != S2N_EARLY_DATA_REJECTED) {
+ POSIX_GUARD_RESULT(s2n_connection_set_early_data_state(conn, S2N_EARLY_DATA_NOT_REQUESTED));
+ }
+ return S2N_SUCCESS;
+}
+
+/**
+ * The client version of this extension is empty, so we don't read/write any data.
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# The "extension_data" field of this extension contains an
+ *# "EarlyDataIndication" value.
+ *#
+ *# struct {} Empty;
+ *#
+ *# struct {
+ *# select (Handshake.msg_type) {
+ **
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# case client_hello: Empty;
+ **
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# };
+ *# } EarlyDataIndication;
+ **/
+
+static int s2n_client_early_data_indication_send(struct s2n_connection *conn, struct s2n_stuffer *out)
+{
+ POSIX_GUARD_RESULT(s2n_setup_middlebox_compat_for_early_data(conn));
+ POSIX_GUARD_RESULT(s2n_connection_set_early_data_state(conn, S2N_EARLY_DATA_REQUESTED));
+
+ /* Set the cipher suite for early data */
+ struct s2n_psk *first_psk = NULL;
+ POSIX_GUARD_RESULT(s2n_array_get(&conn->psk_params.psk_list, 0, (void**) &first_psk));
+ POSIX_ENSURE_REF(first_psk);
+ conn->secure.cipher_suite = first_psk->early_data_config.cipher_suite;
+
+ return S2N_SUCCESS;
+}
+
+static int s2n_client_early_data_indiction_recv(struct s2n_connection *conn, struct s2n_stuffer *in)
+{
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# A client MUST NOT include the
+ *# "early_data" extension in its followup ClientHello.
+ */
+ POSIX_ENSURE(conn->handshake.message_number == 0, S2N_ERR_UNSUPPORTED_EXTENSION);
+
+ /* Although technically we could NOT set the [MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS] handshake type
+ * for the server because the server ignores the Client CCS message state, doing so would mean that
+ * the client and server state machines would be out of sync and potentially cause confusion.
+ */
+ POSIX_GUARD_RESULT(s2n_setup_middlebox_compat_for_early_data(conn));
+
+ POSIX_GUARD_RESULT(s2n_connection_set_early_data_state(conn, S2N_EARLY_DATA_REQUESTED));
+ return S2N_SUCCESS;
+}
+
+const s2n_extension_type s2n_client_early_data_indication_extension = {
+ .iana_value = TLS_EXTENSION_EARLY_DATA,
+ .is_response = false,
+ .send = s2n_client_early_data_indication_send,
+ .recv = s2n_client_early_data_indiction_recv,
+ .should_send = s2n_client_early_data_indication_should_send,
+ .if_missing = s2n_client_early_data_indication_is_missing,
+};
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_ems.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_ems.c
new file mode 100644
index 0000000000..36ec3d339f
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_ems.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include <sys/param.h>
+#include <stdint.h>
+
+#include "tls/s2n_tls.h"
+#include "tls/extensions/s2n_ems.h"
+
+#include "utils/s2n_safety.h"
+
+static int s2n_client_ems_recv(struct s2n_connection *conn, struct s2n_stuffer *extension);
+static bool s2n_client_ems_should_send(struct s2n_connection *conn);
+
+/**
+ *= https://tools.ietf.org/rfc/rfc7627#section-5.1
+ *#
+ *# This document defines a new TLS extension, "extended_master_secret"
+ *# (with extension type 0x0017), which is used to signal both client and
+ *# server to use the extended master secret computation. The
+ *# "extension_data" field of this extension is empty. Thus, the entire
+ *# encoding of the extension is 00 17 00 00 (in hexadecimal.)
+ **/
+const s2n_extension_type s2n_client_ems_extension = {
+ .iana_value = TLS_EXTENSION_EMS,
+ .is_response = false,
+ .send = s2n_extension_send_noop,
+ .recv = s2n_client_ems_recv,
+ .should_send = s2n_client_ems_should_send,
+ .if_missing = s2n_extension_noop_if_missing,
+};
+
+static int s2n_client_ems_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
+{
+ POSIX_ENSURE_REF(conn);
+
+ /* Read nothing. The extension just needs to exist. */
+ conn->ems_negotiated = true;
+
+ return S2N_SUCCESS;
+}
+
+static bool s2n_client_ems_should_send(struct s2n_connection *conn)
+{
+ /* Don't send this extension if the previous session did not negotiate EMS */
+ if (conn->set_session && !conn->ems_negotiated) {
+ return false;
+ } else {
+ return true;
+ }
+}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_key_share.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_key_share.c
index 178f2e9c1d..6073263014 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_key_share.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_key_share.c
@@ -24,8 +24,6 @@
#include "tls/s2n_tls13.h"
#include "pq-crypto/s2n_pq.h"
-#define S2N_IS_KEY_SHARE_LIST_EMPTY(preferred_key_shares) (preferred_key_shares & 1)
-#define S2N_IS_KEY_SHARE_REQUESTED(preferred_key_shares, i) ((preferred_key_shares >> (i + 1)) & 1)
/**
* Specified in https://tools.ietf.org/html/rfc8446#section-4.2.8
* "The "key_share" extension contains the endpoint's cryptographic parameters."
@@ -54,64 +52,57 @@ static int s2n_client_key_share_recv(struct s2n_connection *conn, struct s2n_stu
const s2n_extension_type s2n_client_key_share_extension = {
.iana_value = TLS_EXTENSION_KEY_SHARE,
+ .minimum_version = S2N_TLS13,
.is_response = false,
.send = s2n_client_key_share_send,
.recv = s2n_client_key_share_recv,
- .should_send = s2n_extension_send_if_tls13_connection,
+ .should_send = s2n_extension_always_send,
.if_missing = s2n_extension_noop_if_missing,
};
-static int s2n_generate_preferred_ecc_key_shares(struct s2n_connection *conn, struct s2n_stuffer *out)
+static int s2n_generate_default_ecc_key_share(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- notnull_check(conn);
- uint8_t preferred_key_shares = conn->preferred_key_shares;
- struct s2n_ecc_evp_params *ecc_evp_params = NULL;
-
+ POSIX_ENSURE_REF(conn);
const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
+ POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
+ POSIX_ENSURE_REF(ecc_pref);
- /* If lsb is set, skip keyshare generation for all curve */
- if (S2N_IS_KEY_SHARE_LIST_EMPTY(preferred_key_shares)) {
- return S2N_SUCCESS;
- }
+ /* We only ever send a single EC key share: either the share requested by the server
+ * during a retry, or the most preferred share according to local preferences.
+ */
+ struct s2n_ecc_evp_params *client_params = &conn->kex_params.client_ecc_evp_params;
+ if (s2n_is_hello_retry_handshake(conn)) {
+ const struct s2n_ecc_named_curve *server_curve = conn->kex_params.server_ecc_evp_params.negotiated_curve;
- for (size_t i = 0; i < ecc_pref->count; i++) {
- /* If a bit in the bitmap (minus the lsb) is set, generate keyshare for the corresponding curve */
- if (S2N_IS_KEY_SHARE_REQUESTED(preferred_key_shares, i)) {
- ecc_evp_params = &conn->secure.client_ecc_evp_params[i];
- ecc_evp_params->negotiated_curve = ecc_pref->ecc_curves[i];
- GUARD(s2n_ecdhe_parameters_send(ecc_evp_params, out));
+ /* If the server did not request a specific ECC keyshare, don't send one */
+ if (!server_curve) {
+ return S2N_SUCCESS;
}
- }
- return S2N_SUCCESS;
-}
-
-static int s2n_generate_default_ecc_key_share(struct s2n_connection *conn, struct s2n_stuffer *out)
-{
- notnull_check(conn);
- const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
+ /* If the server requested a new ECC keyshare, free the old one */
+ if (server_curve != client_params->negotiated_curve) {
+ POSIX_GUARD(s2n_ecc_evp_params_free(client_params));
+ }
- struct s2n_ecc_evp_params *ecc_evp_params = NULL;
- ecc_evp_params = &conn->secure.client_ecc_evp_params[0];
- ecc_evp_params->negotiated_curve = ecc_pref->ecc_curves[0];
- GUARD(s2n_ecdhe_parameters_send(ecc_evp_params, out));
+ client_params->negotiated_curve = server_curve;
+ } else {
+ client_params->negotiated_curve = ecc_pref->ecc_curves[0];
+ }
+ POSIX_GUARD(s2n_ecdhe_parameters_send(client_params, out));
return S2N_SUCCESS;
}
-static int s2n_generate_pq_hybrid_key_share(struct s2n_stuffer *out, struct s2n_kem_group_params *kem_group_params) {
- notnull_check(out);
- notnull_check(kem_group_params);
+static int s2n_generate_pq_hybrid_key_share(struct s2n_stuffer *out, struct s2n_kem_group_params *kem_group_params)
+{
+ POSIX_ENSURE_REF(out);
+ POSIX_ENSURE_REF(kem_group_params);
/* This function should never be called when PQ is disabled */
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
const struct s2n_kem_group *kem_group = kem_group_params->kem_group;
- notnull_check(kem_group);
+ POSIX_ENSURE_REF(kem_group);
/* The structure of the PQ share is:
* IANA ID (2 bytes)
@@ -120,29 +111,28 @@ static int s2n_generate_pq_hybrid_key_share(struct s2n_stuffer *out, struct s2n_
* || ECC key share (variable bytes)
* || size of PQ key share (2 bytes)
* || PQ key share (variable bytes) */
- GUARD(s2n_stuffer_write_uint16(out, kem_group->iana_id));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, kem_group->iana_id));
struct s2n_stuffer_reservation total_share_size = {0};
- GUARD(s2n_stuffer_reserve_uint16(out, &total_share_size));
+ POSIX_GUARD(s2n_stuffer_reserve_uint16(out, &total_share_size));
struct s2n_ecc_evp_params *ecc_params = &kem_group_params->ecc_params;
ecc_params->negotiated_curve = kem_group->curve;
- GUARD(s2n_stuffer_write_uint16(out, ecc_params->negotiated_curve->share_size));
- GUARD(s2n_ecc_evp_generate_ephemeral_key(ecc_params));
- GUARD(s2n_ecc_evp_write_params_point(ecc_params, out));
+ POSIX_GUARD_RESULT(s2n_ecdhe_send_public_key(ecc_params, out));
struct s2n_kem_params *kem_params = &kem_group_params->kem_params;
kem_params->kem = kem_group->kem;
- GUARD(s2n_kem_send_public_key(out, kem_params));
+ POSIX_GUARD(s2n_kem_send_public_key(out, kem_params));
- GUARD(s2n_stuffer_write_vector_size(&total_share_size));
+ POSIX_GUARD(s2n_stuffer_write_vector_size(&total_share_size));
return S2N_SUCCESS;
}
-static int s2n_generate_default_pq_hybrid_key_share(struct s2n_connection *conn, struct s2n_stuffer *out) {
- notnull_check(conn);
- notnull_check(out);
+static int s2n_generate_default_pq_hybrid_key_share(struct s2n_connection *conn, struct s2n_stuffer *out)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(out);
/* Client should skip sending PQ groups/key shares if PQ is disabled */
if (!s2n_pq_is_enabled()) {
@@ -150,206 +140,113 @@ static int s2n_generate_default_pq_hybrid_key_share(struct s2n_connection *conn,
}
const struct s2n_kem_preferences *kem_pref = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
- notnull_check(kem_pref);
+ POSIX_GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
+ POSIX_ENSURE_REF(kem_pref);
if (kem_pref->tls13_kem_group_count == 0) {
return S2N_SUCCESS;
}
- /* We only send a single PQ key share - the highest preferred one */
- struct s2n_kem_group_params *kem_group_params = &conn->secure.client_kem_group_params[0];
- kem_group_params->kem_group = kem_pref->tls13_kem_groups[0];
-
- GUARD(s2n_generate_pq_hybrid_key_share(out, kem_group_params));
-
- return S2N_SUCCESS;
-}
-
-static int s2n_wipe_all_client_keyshares(struct s2n_connection *conn) {
- notnull_check(conn);
-
- const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
-
- const struct s2n_kem_preferences *kem_pref = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
- notnull_check(kem_pref);
-
- for (size_t i = 0; i < ecc_pref->count; i++) {
- GUARD(s2n_ecc_evp_params_free(&conn->secure.client_ecc_evp_params[i]));
- conn->secure.client_ecc_evp_params[i].negotiated_curve = NULL;
- }
-
- for (size_t i = 0; i < kem_pref->tls13_kem_group_count; i++) {
- GUARD(s2n_kem_group_free(&conn->secure.client_kem_group_params[i]));
- conn->secure.client_kem_group_params[i].kem_group = NULL;
- conn->secure.client_kem_group_params[i].kem_params.kem = NULL;
- conn->secure.client_kem_group_params[i].ecc_params.negotiated_curve = NULL;
- }
-
- return S2N_SUCCESS;
-}
-
-static int s2n_send_hrr_ecc_keyshare(struct s2n_connection *conn, struct s2n_stuffer *out)
-{
- notnull_check(conn);
- const struct s2n_ecc_named_curve *server_negotiated_curve = NULL;
- struct s2n_ecc_evp_params *ecc_evp_params = NULL;
-
- const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
-
- server_negotiated_curve = conn->secure.server_ecc_evp_params.negotiated_curve;
- ENSURE_POSIX(server_negotiated_curve != NULL, S2N_ERR_BAD_KEY_SHARE);
- ENSURE_POSIX(s2n_ecc_preferences_includes_curve(ecc_pref, server_negotiated_curve->iana_id),
- S2N_ERR_INVALID_HELLO_RETRY);
+ /* We only ever send a single PQ key share: either the share requested by the server
+ * during a retry, or the most preferred share according to local preferences.
+ */
+ struct s2n_kem_group_params *client_params = &conn->kex_params.client_kem_group_params;
+ if (s2n_is_hello_retry_handshake(conn)) {
+ const struct s2n_kem_group *server_group = conn->kex_params.server_kem_group_params.kem_group;
- for (size_t i = 0; i < ecc_pref->count; i++) {
- if (ecc_pref->ecc_curves[i]->iana_id == server_negotiated_curve->iana_id) {
- ecc_evp_params = &conn->secure.client_ecc_evp_params[i];
- ENSURE_POSIX(ecc_evp_params->evp_pkey == NULL, S2N_ERR_INVALID_HELLO_RETRY);
+ /* If the server did not request a specific PQ keyshare, don't send one */
+ if (!server_group) {
+ return S2N_SUCCESS;
}
- }
-
- /* None of the previously generated keyshares were selected for negotiation, so wipe them */
- GUARD(s2n_wipe_all_client_keyshares(conn));
- /* Generate the keyshare for the server negotiated curve */
- ecc_evp_params->negotiated_curve = server_negotiated_curve;
- GUARD(s2n_ecdhe_parameters_send(ecc_evp_params, out));
-
- return S2N_SUCCESS;
-}
-
-static int s2n_send_hrr_pq_hybrid_keyshare(struct s2n_connection *conn, struct s2n_stuffer *out) {
- notnull_check(conn);
- notnull_check(out);
-
- /* If PQ is disabled, the client should not have sent any PQ IDs
- * in the supported_groups list of the initial ClientHello */
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
- const struct s2n_kem_preferences *kem_pref = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
- notnull_check(kem_pref);
-
- const struct s2n_kem_group *server_negotiated_kem_group = conn->secure.server_kem_group_params.kem_group;
- ENSURE_POSIX(server_negotiated_kem_group != NULL, S2N_ERR_INVALID_HELLO_RETRY);
- ENSURE_POSIX(s2n_kem_preferences_includes_tls13_kem_group(kem_pref, server_negotiated_kem_group->iana_id),
- S2N_ERR_INVALID_HELLO_RETRY);
- struct s2n_kem_group_params *kem_group_params = NULL;
-
- for (size_t i = 0; i < kem_pref->tls13_kem_group_count; i++) {
- if (kem_pref->tls13_kem_groups[i]->iana_id == server_negotiated_kem_group->iana_id) {
- kem_group_params = &conn->secure.client_kem_group_params[i];
- ENSURE_POSIX(kem_group_params->kem_group == NULL, S2N_ERR_INVALID_HELLO_RETRY);
- ENSURE_POSIX(kem_group_params->ecc_params.evp_pkey == NULL, S2N_ERR_INVALID_HELLO_RETRY);
- ENSURE_POSIX(kem_group_params->kem_params.private_key.data == NULL, S2N_ERR_INVALID_HELLO_RETRY);
+ /* If the server requested a new PQ keyshare, free the old one */
+ if (client_params->kem_group != server_group) {
+ POSIX_GUARD(s2n_kem_group_free(client_params));
}
- }
-
- /* None of the previously generated keyshares were selected for negotiation, so wipe them */
- GUARD(s2n_wipe_all_client_keyshares(conn));
- /* Generate the keyshare for the server negotiated KEM group */
- kem_group_params->kem_group = server_negotiated_kem_group;
- GUARD(s2n_generate_pq_hybrid_key_share(out, kem_group_params));
-
- return S2N_SUCCESS;
-}
-/* From https://tools.ietf.org/html/rfc8446#section-4.1.2
- * If a "key_share" extension was supplied in the HelloRetryRequest,
- * replace the list of shares with a list containing a single
- * KeyShareEntry from the indicated group.*/
-static int s2n_send_hrr_keyshare(struct s2n_connection *conn, struct s2n_stuffer *out) {
- notnull_check(conn);
- notnull_check(out);
-
- if (conn->secure.server_kem_group_params.kem_group != NULL) {
- GUARD(s2n_send_hrr_pq_hybrid_keyshare(conn, out));
+ client_params->kem_group = server_group;
} else {
- GUARD(s2n_send_hrr_ecc_keyshare(conn, out));
+ client_params->kem_group = kem_pref->tls13_kem_groups[0];
}
+ POSIX_GUARD(s2n_generate_pq_hybrid_key_share(out, client_params));
return S2N_SUCCESS;
}
-static int s2n_ecdhe_supported_curves_send(struct s2n_connection *conn, struct s2n_stuffer *out)
-{
- if (!conn->preferred_key_shares) {
- GUARD(s2n_generate_default_ecc_key_share(conn, out));
- return S2N_SUCCESS;
- }
-
- GUARD(s2n_generate_preferred_ecc_key_shares(conn, out));
- return S2N_SUCCESS;
-}
-
static int s2n_client_key_share_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
struct s2n_stuffer_reservation shares_size = {0};
- GUARD(s2n_stuffer_reserve_uint16(out, &shares_size));
+ POSIX_GUARD(s2n_stuffer_reserve_uint16(out, &shares_size));
+ POSIX_GUARD(s2n_generate_default_pq_hybrid_key_share(conn, out));
+ POSIX_GUARD(s2n_generate_default_ecc_key_share(conn, out));
+ POSIX_GUARD(s2n_stuffer_write_vector_size(&shares_size));
- if (s2n_is_hello_retry_handshake(conn)) {
- GUARD(s2n_send_hrr_keyshare(conn, out));
- } else {
- GUARD(s2n_generate_default_pq_hybrid_key_share(conn, out));
- GUARD(s2n_ecdhe_supported_curves_send(conn, out));
- }
-
- GUARD(s2n_stuffer_write_vector_size(&shares_size));
+ /* We must have written at least one share */
+ POSIX_ENSURE(s2n_stuffer_data_available(out) > shares_size.length, S2N_ERR_BAD_KEY_SHARE);
return S2N_SUCCESS;
}
static int s2n_client_key_share_parse_ecc(struct s2n_stuffer *key_share, const struct s2n_ecc_named_curve *curve,
- struct s2n_ecc_evp_params *ecc_params) {
- notnull_check(key_share);
- notnull_check(curve);
- notnull_check(ecc_params);
+ struct s2n_ecc_evp_params *ecc_params)
+{
+ POSIX_ENSURE_REF(key_share);
+ POSIX_ENSURE_REF(curve);
+ POSIX_ENSURE_REF(ecc_params);
struct s2n_blob point_blob = { 0 };
- GUARD(s2n_ecc_evp_read_params_point(key_share, curve->share_size, &point_blob));
+ POSIX_GUARD(s2n_ecc_evp_read_params_point(key_share, curve->share_size, &point_blob));
/* Ignore curves with points we can't parse */
ecc_params->negotiated_curve = curve;
if (s2n_ecc_evp_parse_params_point(&point_blob, ecc_params) != S2N_SUCCESS) {
ecc_params->negotiated_curve = NULL;
- GUARD(s2n_ecc_evp_params_free(ecc_params));
+ POSIX_GUARD(s2n_ecc_evp_params_free(ecc_params));
}
return S2N_SUCCESS;
}
-static int s2n_client_key_share_recv_ecc(struct s2n_connection *conn, struct s2n_stuffer *key_share,
- uint16_t curve_iana_id, bool *match) {
- notnull_check(conn);
- notnull_check(key_share);
- notnull_check(match);
+static int s2n_client_key_share_recv_ecc(struct s2n_connection *conn, struct s2n_stuffer *key_share, uint16_t curve_iana_id)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(key_share);
const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
+ POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
+ POSIX_ENSURE_REF(ecc_pref);
+
+ struct s2n_ecc_evp_params *client_params = &conn->kex_params.client_ecc_evp_params;
const struct s2n_ecc_named_curve *curve = NULL;
- struct s2n_ecc_evp_params *client_ecc_params = NULL;
for (size_t i = 0; i < ecc_pref->count; i++) {
- if (curve_iana_id == ecc_pref->ecc_curves[i]->iana_id) {
- curve = ecc_pref->ecc_curves[i];
- client_ecc_params = &conn->secure.client_ecc_evp_params[i];
+ const struct s2n_ecc_named_curve *supported_curve = ecc_pref->ecc_curves[i];
+ POSIX_ENSURE_REF(supported_curve);
+
+ /* Stop if we reach the current highest priority share.
+ * Any share of lower priority is discarded.
+ */
+ if (client_params->negotiated_curve == supported_curve) {
break;
}
- }
- /* Ignore unsupported curves */
- if (!curve || !client_ecc_params) {
- return S2N_SUCCESS;
+ /* Skip if not supported by the client.
+ * The client must not send shares it doesn't support, but the server
+ * is not required to error if they are encountered.
+ */
+ if (!conn->kex_params.mutually_supported_curves[i]) {
+ continue;
+ }
+
+ /* Stop if we find a match */
+ if (curve_iana_id == supported_curve->iana_id) {
+ curve = supported_curve;
+ break;
+ }
}
- /* Ignore curves that we've already received material for */
- if (client_ecc_params->negotiated_curve) {
+ /* Ignore unsupported curves */
+ if (!curve) {
return S2N_SUCCESS;
}
@@ -358,47 +255,66 @@ static int s2n_client_key_share_recv_ecc(struct s2n_connection *conn, struct s2n
return S2N_SUCCESS;
}
- GUARD(s2n_client_key_share_parse_ecc(key_share, curve, client_ecc_params));
- /* negotiated_curve will be non-NULL if the key share was parsed successfully */
- if (client_ecc_params->negotiated_curve) {
- *match = true;
+ DEFER_CLEANUP(struct s2n_ecc_evp_params new_client_params = { 0 }, s2n_ecc_evp_params_free);
+
+ POSIX_GUARD(s2n_client_key_share_parse_ecc(key_share, curve, &new_client_params));
+ /* negotiated_curve will be NULL if the key share was not parsed successfully */
+ if (!new_client_params.negotiated_curve) {
+ return S2N_SUCCESS;
}
+ POSIX_GUARD(s2n_ecc_evp_params_free(client_params));
+ *client_params = new_client_params;
+
+ ZERO_TO_DISABLE_DEFER_CLEANUP(new_client_params);
return S2N_SUCCESS;
}
-static int s2n_client_key_share_recv_pq_hybrid(struct s2n_connection *conn, struct s2n_stuffer *key_share,
- uint16_t kem_group_iana_id, bool *match) {
- notnull_check(conn);
- notnull_check(key_share);
- notnull_check(match);
+static int s2n_client_key_share_recv_pq_hybrid(struct s2n_connection *conn, struct s2n_stuffer *key_share, uint16_t kem_group_iana_id)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(key_share);
const struct s2n_kem_preferences *kem_pref = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
- notnull_check(kem_pref);
+ POSIX_GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
+ POSIX_ENSURE_REF(kem_pref);
/* Ignore key share if PQ is not enabled */
if (!s2n_pq_is_enabled()) {
return S2N_SUCCESS;
}
+ struct s2n_kem_group_params *client_params = &conn->kex_params.client_kem_group_params;
+
const struct s2n_kem_group *kem_group = NULL;
- struct s2n_kem_group_params *client_kem_group_params = NULL;
for (size_t i = 0; i < kem_pref->tls13_kem_group_count; i++) {
- if (kem_group_iana_id == kem_pref->tls13_kem_groups[i]->iana_id) {
- kem_group = kem_pref->tls13_kem_groups[i];
- client_kem_group_params = &conn->secure.client_kem_group_params[i];
+ const struct s2n_kem_group *supported_group = kem_pref->tls13_kem_groups[i];
+ POSIX_ENSURE_REF(supported_group);
+
+ /* Stop if we reach the current highest priority share.
+ * Any share of lower priority is discarded.
+ */
+ if (client_params->kem_group == supported_group) {
break;
}
- }
- /* Ignore unsupported KEM groups */
- if (!kem_group || !client_kem_group_params) {
- return S2N_SUCCESS;
+ /* Skip if not supported by the client.
+ * The client must not send shares it doesn't support, but the server
+ * is not required to error if they are encountered.
+ */
+ if (!conn->kex_params.mutually_supported_kem_groups[i]) {
+ continue;
+ }
+
+ /* Stop if we find a match */
+ if (kem_group_iana_id == supported_group->iana_id) {
+ kem_group = supported_group;
+ break;
+ }
}
- /* Ignore KEM groups that we've already received material for */
- if (client_kem_group_params->kem_group) {
+ /* Ignore unsupported KEM groups */
+ if (!kem_group) {
return S2N_SUCCESS;
}
@@ -407,85 +323,86 @@ static int s2n_client_key_share_recv_pq_hybrid(struct s2n_connection *conn, stru
return S2N_SUCCESS;
}
- uint16_t ec_share_size = 0;
- GUARD(s2n_stuffer_read_uint16(key_share, &ec_share_size));
/* Ignore KEM groups with unexpected ECC share sizes */
+ uint16_t ec_share_size = 0;
+ POSIX_GUARD(s2n_stuffer_read_uint16(key_share, &ec_share_size));
if (ec_share_size != kem_group->curve->share_size) {
return S2N_SUCCESS;
}
- GUARD(s2n_client_key_share_parse_ecc(key_share, kem_group->curve, &client_kem_group_params->ecc_params));
+ DEFER_CLEANUP(struct s2n_kem_group_params new_client_params = { 0 }, s2n_kem_group_free);
+ new_client_params.kem_group = kem_group;
+
+ POSIX_GUARD(s2n_client_key_share_parse_ecc(key_share, kem_group->curve, &new_client_params.ecc_params));
/* If we were unable to parse the EC portion of the share, negotiated_curve
* will be NULL, and we should ignore the entire key share. */
- if (!client_kem_group_params->ecc_params.negotiated_curve) {
+ if (!new_client_params.ecc_params.negotiated_curve) {
return S2N_SUCCESS;
}
/* Note: the PQ share size is validated in s2n_kem_recv_public_key() */
/* Ignore groups with PQ public keys we can't parse */
- client_kem_group_params->kem_params.kem = kem_group->kem;
- if (s2n_kem_recv_public_key(key_share, &client_kem_group_params->kem_params) != S2N_SUCCESS) {
- client_kem_group_params->kem_group = NULL;
- client_kem_group_params->kem_params.kem = NULL;
- client_kem_group_params->ecc_params.negotiated_curve = NULL;
- /* s2n_kem_group_free() will free both the ECC and KEM params */
- GUARD(s2n_kem_group_free(client_kem_group_params));
+ new_client_params.kem_params.kem = kem_group->kem;
+ if (s2n_kem_recv_public_key(key_share, &new_client_params.kem_params) != S2N_SUCCESS) {
return S2N_SUCCESS;
}
- client_kem_group_params->kem_group = kem_group;
- *match = true;
+ POSIX_GUARD(s2n_kem_group_free(client_params));
+ *client_params = new_client_params;
+
+ ZERO_TO_DISABLE_DEFER_CLEANUP(new_client_params);
return S2N_SUCCESS;
}
-static int s2n_client_key_share_recv(struct s2n_connection *conn, struct s2n_stuffer *extension) {
- notnull_check(conn);
- notnull_check(extension);
-
- if (s2n_connection_get_protocol_version(conn) < S2N_TLS13) {
- return S2N_SUCCESS;
- }
-
- const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
-
- const struct s2n_kem_preferences *kem_pref = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
- notnull_check(kem_pref);
+/*
+ * We chose our most preferred group of the mutually supported groups while processing the
+ * supported_groups extension. However, our true most preferred group is always the
+ * group that we already have a key share for, since retries are expensive.
+ *
+ * This method modifies our group selection based on what keyshares are available.
+ * It then stores the client keyshare for the selected group, or initiates a retry
+ * if no valid keyshares are available.
+ */
+static int s2n_client_key_share_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(extension);
uint16_t key_shares_size;
- GUARD(s2n_stuffer_read_uint16(extension, &key_shares_size));
- ENSURE_POSIX(s2n_stuffer_data_available(extension) >= key_shares_size, S2N_ERR_BAD_MESSAGE);
-
- uint16_t named_group, share_size;
- bool match_found = false;
- /* bytes_processed is declared as a uint32_t to avoid integer overflow in later calculations */
- uint32_t bytes_processed = 0;
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &key_shares_size));
+ POSIX_ENSURE(s2n_stuffer_data_available(extension) == key_shares_size, S2N_ERR_BAD_MESSAGE);
- while (bytes_processed < key_shares_size) {
- GUARD(s2n_stuffer_read_uint16(extension, &named_group));
- GUARD(s2n_stuffer_read_uint16(extension, &share_size));
+ uint16_t named_group = 0, share_size = 0;
+ struct s2n_blob key_share_blob = { 0 };
+ struct s2n_stuffer key_share = { 0 };
- ENSURE_POSIX(s2n_stuffer_data_available(extension) >= share_size, S2N_ERR_BAD_MESSAGE);
- bytes_processed += share_size + S2N_SIZE_OF_NAMED_GROUP + S2N_SIZE_OF_KEY_SHARE_SIZE;
+ uint16_t keyshare_count = 0;
+ while(s2n_stuffer_data_available(extension) > 0) {
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &named_group));
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &share_size));
+ POSIX_ENSURE(s2n_stuffer_data_available(extension) >= share_size, S2N_ERR_BAD_MESSAGE);
- struct s2n_blob key_share_blob = { .size = share_size, .data = s2n_stuffer_raw_read(extension, share_size) };
- notnull_check(key_share_blob.data);
- struct s2n_stuffer key_share = { 0 };
- GUARD(s2n_stuffer_init(&key_share, &key_share_blob));
- GUARD(s2n_stuffer_skip_write(&key_share, share_size));
+ POSIX_GUARD(s2n_blob_init(&key_share_blob,
+ s2n_stuffer_raw_read(extension, share_size), share_size));
+ POSIX_GUARD(s2n_stuffer_init(&key_share, &key_share_blob));
+ POSIX_GUARD(s2n_stuffer_skip_write(&key_share, share_size));
+ keyshare_count++;
/* Try to parse the share as ECC, then as PQ/hybrid; will ignore
* shares for unrecognized groups. */
- GUARD(s2n_client_key_share_recv_ecc(conn, &key_share, named_group, &match_found));
- GUARD(s2n_client_key_share_recv_pq_hybrid(conn, &key_share, named_group, &match_found));
+ POSIX_GUARD(s2n_client_key_share_recv_ecc(conn, &key_share, named_group));
+ POSIX_GUARD(s2n_client_key_share_recv_pq_hybrid(conn, &key_share, named_group));
}
+ /* During a retry, the client should only have sent one keyshare */
+ POSIX_ENSURE(!s2n_is_hello_retry_handshake(conn) || keyshare_count == 1, S2N_ERR_BAD_MESSAGE);
+
/* If there were no matching key shares, then we received an empty key share extension
* or we didn't match a key share with a supported group. We should send a retry. */
- if (!match_found) {
- GUARD(s2n_set_hello_retry_required(conn));
+ struct s2n_ecc_evp_params *client_ecc_params = &conn->kex_params.client_ecc_evp_params;
+ struct s2n_kem_group_params *client_pq_params = &conn->kex_params.client_kem_group_params;
+ if (!client_pq_params->kem_group && !client_ecc_params->negotiated_curve) {
+ POSIX_GUARD(s2n_set_hello_retry_required(conn));
}
return S2N_SUCCESS;
@@ -495,11 +412,11 @@ static int s2n_client_key_share_recv(struct s2n_connection *conn, struct s2n_stu
uint32_t s2n_extensions_client_key_share_size(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
+ POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
+ POSIX_ENSURE_REF(ecc_pref);
uint32_t s2n_client_key_share_extension_size = S2N_SIZE_OF_EXTENSION_TYPE
+ S2N_SIZE_OF_EXTENSION_DATA_SIZE
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_max_frag_len.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_max_frag_len.c
index 880193a1a1..df5d2e5088 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_max_frag_len.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_max_frag_len.c
@@ -52,13 +52,29 @@ static int s2n_client_max_frag_len_recv(struct s2n_connection *conn, struct s2n_
}
uint8_t mfl_code;
- GUARD(s2n_stuffer_read_uint8(extension, &mfl_code));
- if (mfl_code > S2N_TLS_MAX_FRAG_LEN_4096 || mfl_code_to_length[mfl_code] > S2N_TLS_MAXIMUM_FRAGMENT_LENGTH) {
+ POSIX_GUARD(s2n_stuffer_read_uint8(extension, &mfl_code));
+
+ /*
+ *= https://tools.ietf.org/rfc/rfc6066#section-4
+ *= type=exception
+ *= reason=For compatibility, we choose to ignore malformed extensions if they are optional
+ *# If a server receives a maximum fragment length negotiation request
+ *# for a value other than the allowed values, it MUST abort the
+ *# handshake with an "illegal_parameter" alert.
+ */
+ if (mfl_code >= s2n_array_len(mfl_code_to_length) || mfl_code_to_length[mfl_code] > S2N_TLS_MAXIMUM_FRAGMENT_LENGTH) {
return S2N_SUCCESS;
}
- conn->mfl_code = mfl_code;
- conn->max_outgoing_fragment_length = mfl_code_to_length[mfl_code];
+ /*
+ *= https://tools.ietf.org/rfc/rfc6066#section-4
+ *# Once a maximum fragment length other than 2^14 has been successfully
+ *# negotiated, the client and server MUST immediately begin fragmenting
+ *# messages (including handshake messages) to ensure that no fragment
+ *# larger than the negotiated length is sent.
+ */
+ conn->negotiated_mfl_code = mfl_code;
+ POSIX_GUARD_RESULT(s2n_connection_set_max_fragment_length(conn, mfl_code_to_length[mfl_code]));
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_pq_kem.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_pq_kem.c
index 83c4c8f1d0..aae1fe8256 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_pq_kem.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_pq_kem.c
@@ -49,12 +49,12 @@ static bool s2n_client_pq_kem_should_send(struct s2n_connection *conn)
static int s2n_client_pq_kem_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
const struct s2n_kem_preferences *kem_preferences = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_preferences));
- notnull_check(kem_preferences);
+ POSIX_GUARD(s2n_connection_get_kem_preferences(conn, &kem_preferences));
+ POSIX_ENSURE_REF(kem_preferences);
- GUARD(s2n_stuffer_write_uint16(out, kem_preferences->kem_count * sizeof(kem_extension_size)));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, kem_preferences->kem_count * sizeof(kem_extension_size)));
for (int i = 0; i < kem_preferences->kem_count; i++) {
- GUARD(s2n_stuffer_write_uint16(out, kem_preferences->kems[i]->kem_extension_id));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, kem_preferences->kems[i]->kem_extension_id));
}
return S2N_SUCCESS;
@@ -63,14 +63,14 @@ static int s2n_client_pq_kem_send(struct s2n_connection *conn, struct s2n_stuffe
static int s2n_client_pq_kem_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
uint16_t size_of_all;
- struct s2n_blob *proposed_kems = &conn->secure.client_pq_kem_extension;
+ struct s2n_blob *proposed_kems = &conn->kex_params.client_pq_kem_extension;
/* Ignore extension if PQ is disabled */
if (!s2n_pq_is_enabled()) {
return S2N_SUCCESS;
}
- GUARD(s2n_stuffer_read_uint16(extension, &size_of_all));
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &size_of_all));
if (size_of_all > s2n_stuffer_data_available(extension) || size_of_all % sizeof(kem_extension_size)) {
/* Malformed length, ignore the extension */
return S2N_SUCCESS;
@@ -78,7 +78,7 @@ static int s2n_client_pq_kem_recv(struct s2n_connection *conn, struct s2n_stuffe
proposed_kems->size = size_of_all;
proposed_kems->data = s2n_stuffer_raw_read(extension, proposed_kems->size);
- notnull_check(proposed_kems->data);
+ POSIX_ENSURE_REF(proposed_kems->data);
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_psk.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_psk.c
index d457829022..f8278f36b4 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_psk.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_psk.c
@@ -27,30 +27,49 @@
#define SIZE_OF_BINDER_SIZE sizeof(uint8_t)
#define SIZE_OF_BINDER_LIST_SIZE sizeof(uint16_t)
-#define MAX_NUM_OF_PSK_IDENTITIES 100
+
+/* To avoid a DoS attack triggered by decrypting too many session tickets,
+ * set a limit on the number of tickets we will attempt to decrypt before giving up.
+ * We may want to make this configurable someday, but just set a reasonable maximum for now. */
+#define MAX_REJECTED_TICKETS 3
static int s2n_client_psk_send(struct s2n_connection *conn, struct s2n_stuffer *out);
static int s2n_client_psk_recv(struct s2n_connection *conn, struct s2n_stuffer *extension);
+static int s2n_client_psk_is_missing(struct s2n_connection *conn);
const s2n_extension_type s2n_client_psk_extension = {
.iana_value = TLS_EXTENSION_PRE_SHARED_KEY,
+ .minimum_version = S2N_TLS13,
.is_response = false,
.send = s2n_client_psk_send,
.recv = s2n_client_psk_recv,
.should_send = s2n_client_psk_should_send,
- .if_missing = s2n_extension_noop_if_missing,
+ .if_missing = s2n_client_psk_is_missing,
};
+int s2n_client_psk_is_missing(struct s2n_connection *conn)
+{
+ POSIX_ENSURE_REF(conn);
+
+ /* If the PSK extension is missing, we must not have received
+ * a request for early data.
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# When a PSK is used and early data is allowed for that PSK, the client
+ *# can send Application Data in its first flight of messages. If the
+ *# client opts to do so, it MUST supply both the "pre_shared_key" and
+ *# "early_data" extensions.
+ */
+ POSIX_ENSURE(conn->early_data_state != S2N_EARLY_DATA_REQUESTED, S2N_ERR_UNSUPPORTED_EXTENSION);
+ return S2N_SUCCESS;
+}
+
bool s2n_client_psk_should_send(struct s2n_connection *conn)
{
if (conn == NULL) {
return false;
}
- if (s2n_connection_get_protocol_version(conn) < S2N_TLS13) {
- return false;
- }
-
/* If this is NOT the second ClientHello after a retry, then all PSKs are viable.
* Send the extension if any PSKs are configured.
*/
@@ -72,22 +91,62 @@ bool s2n_client_psk_should_send(struct s2n_connection *conn)
return false;
}
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.11.1
+ *# The "obfuscated_ticket_age"
+ *# field of each PskIdentity contains an obfuscated version of the
+ *# ticket age formed by taking the age in milliseconds and adding the
+ *# "ticket_age_add" value that was included with the ticket (see
+ *# Section 4.6.1), modulo 2^32.
+*/
+static S2N_RESULT s2n_generate_obfuscated_ticket_age(struct s2n_psk *psk, uint64_t current_time, uint32_t *output)
+{
+ RESULT_ENSURE_REF(psk);
+ RESULT_ENSURE_MUT(output);
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.11
+ *# For identities
+ *# established externally, an obfuscated_ticket_age of 0 SHOULD be
+ *# used,
+ **/
+ if (psk->type == S2N_PSK_TYPE_EXTERNAL) {
+ *output = 0;
+ return S2N_RESULT_OK;
+ }
+
+ RESULT_ENSURE(current_time >= psk->ticket_issue_time, S2N_ERR_SAFETY);
+
+ /* Calculate ticket age */
+ uint64_t ticket_age_in_nanos = current_time - psk->ticket_issue_time;
+
+ /* Convert ticket age to milliseconds */
+ uint64_t ticket_age_in_millis = ticket_age_in_nanos / ONE_MILLISEC_IN_NANOS;
+ RESULT_ENSURE(ticket_age_in_millis <= UINT32_MAX, S2N_ERR_SAFETY);
+
+ /* Add the ticket_age_add value to the ticket age in milliseconds. The resulting uint32_t value
+ * may wrap, resulting in the modulo 2^32 operation. */
+ *output = ticket_age_in_millis + psk->ticket_age_add;
+
+ return S2N_RESULT_OK;
+}
+
static int s2n_client_psk_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
struct s2n_psk_parameters *psk_params = &conn->psk_params;
struct s2n_array *psk_list = &psk_params->psk_list;
struct s2n_stuffer_reservation identity_list_size;
- GUARD(s2n_stuffer_reserve_uint16(out, &identity_list_size));
+ POSIX_GUARD(s2n_stuffer_reserve_uint16(out, &identity_list_size));
uint16_t binder_list_size = SIZE_OF_BINDER_LIST_SIZE;
for (size_t i = 0; i < psk_list->len; i++) {
struct s2n_psk *psk = NULL;
- GUARD_AS_POSIX(s2n_array_get(psk_list, i, (void**) &psk));
- notnull_check(psk);
+ POSIX_GUARD_RESULT(s2n_array_get(psk_list, i, (void**) &psk));
+ POSIX_ENSURE_REF(psk);
/**
*= https://tools.ietf.org/rfc/rfc8446#section-4.1.4
@@ -100,17 +159,23 @@ static int s2n_client_psk_send(struct s2n_connection *conn, struct s2n_stuffer *
}
/* Write the identity */
- GUARD(s2n_stuffer_write_uint16(out, psk->identity.size));
- GUARD(s2n_stuffer_write(out, &psk->identity));
- GUARD(s2n_stuffer_write_uint32(out, 0));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, psk->identity.size));
+ POSIX_GUARD(s2n_stuffer_write(out, &psk->identity));
+
+ /* Write obfuscated ticket age */
+ uint32_t obfuscated_ticket_age = 0;
+ uint64_t current_time = 0;
+ POSIX_GUARD(conn->config->wall_clock(conn->config->sys_clock_ctx, &current_time));
+ POSIX_GUARD_RESULT(s2n_generate_obfuscated_ticket_age(psk, current_time, &obfuscated_ticket_age));
+ POSIX_GUARD(s2n_stuffer_write_uint32(out, obfuscated_ticket_age));
/* Calculate binder size */
uint8_t hash_size = 0;
- GUARD(s2n_hmac_digest_size(psk->hmac_alg, &hash_size));
+ POSIX_GUARD(s2n_hmac_digest_size(psk->hmac_alg, &hash_size));
binder_list_size += hash_size + SIZE_OF_BINDER_SIZE;
}
- GUARD(s2n_stuffer_write_vector_size(&identity_list_size));
+ POSIX_GUARD(s2n_stuffer_write_vector_size(&identity_list_size));
/* Calculating the binders requires a complete ClientHello, and at this point
* the extension size, extension list size, and message size are all blank.
@@ -118,231 +183,177 @@ static int s2n_client_psk_send(struct s2n_connection *conn, struct s2n_stuffer *
* We'll write placeholder data to ensure the extension and extension list sizes
* are calculated correctly, then rewrite the binders with real data later. */
psk_params->binder_list_size = binder_list_size;
- GUARD(s2n_stuffer_skip_write(out, binder_list_size));
+ POSIX_GUARD(s2n_stuffer_skip_write(out, binder_list_size));
return S2N_SUCCESS;
}
-/* Match a PSK identity received from the client against the server's known PSK identities.
+/* Find the first of the server's PSK identities that matches the client's identities.
+ * This method compares all server identities to all client identities.
*
- * While both the client's offered identities and whether a match was found are public, we should make an attempt
- * to keep the server's known identities a secret. We will make comparisons to the server's identities constant
+ * While both the client's identities and whether a match was found are public, we should make an attempt
+ * to keep the server's identities a secret. We will make comparisons to the server's identities constant
* time (to hide partial matches) and not end the search early when a match is found (to hide the ordering).
*
* Keeping these comparisons constant time is not high priority. There's no known attack using these timings,
* and an attacker could probably guess the server's known identities just by observing the public identities
* sent by clients.
*/
-static S2N_RESULT s2n_match_psk_identity(struct s2n_array *known_psks, const struct s2n_blob *wire_identity,
- struct s2n_psk **match)
+static S2N_RESULT s2n_select_external_psk(struct s2n_connection *conn, struct s2n_offered_psk_list *client_identity_list)
{
- ENSURE_REF(match);
- ENSURE_REF(wire_identity);
- ENSURE_REF(known_psks);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(client_identity_list);
- *match = NULL;
-
- for (size_t i = 0; i < known_psks->len; i++) {
- struct s2n_psk *psk = NULL;
- GUARD_RESULT(s2n_array_get(known_psks, i, (void**)&psk));
- ENSURE_REF(psk);
-
- ENSURE_REF(psk->identity.data);
- ENSURE_REF(wire_identity->data);
+ struct s2n_array *server_psks = &conn->psk_params.psk_list;
+ conn->psk_params.chosen_psk = NULL;
- uint32_t compare_size = MIN(wire_identity->size, psk->identity.size);
- if (s2n_constant_time_equals(psk->identity.data, wire_identity->data, compare_size)
- & (psk->identity.size == wire_identity->size) & (!*match)) {
- *match = psk;
- }
+ for (size_t i = 0; i < server_psks->len; i++) {
+ struct s2n_psk *server_psk = NULL;
+ RESULT_GUARD(s2n_array_get(server_psks, i, (void**) &server_psk));
+ RESULT_ENSURE_REF(server_psk);
+
+ struct s2n_offered_psk client_psk = { 0 };
+ uint16_t wire_index = 0;
+
+ RESULT_GUARD_POSIX(s2n_offered_psk_list_reread(client_identity_list));
+ while(s2n_offered_psk_list_has_next(client_identity_list)) {
+ RESULT_GUARD_POSIX(s2n_offered_psk_list_next(client_identity_list, &client_psk));
+ uint16_t compare_size = MIN(client_psk.identity.size, server_psk->identity.size);
+ if (s2n_constant_time_equals(client_psk.identity.data, server_psk->identity.data, compare_size)
+ & (client_psk.identity.size == server_psk->identity.size)
+ & (conn->psk_params.chosen_psk == NULL)) {
+ conn->psk_params.chosen_psk = server_psk;
+ conn->psk_params.chosen_psk_wire_index = wire_index;
+ }
+ wire_index++;
+ };
}
+ RESULT_ENSURE_REF(conn->psk_params.chosen_psk);
return S2N_RESULT_OK;
}
-static S2N_RESULT s2n_select_psk_identity(struct s2n_connection *conn, struct s2n_psk_identity *identities, size_t identities_length)
-{
- ENSURE_REF(conn);
- ENSURE_REF(identities);
+static S2N_RESULT s2n_select_resumption_psk(struct s2n_connection *conn, struct s2n_offered_psk_list *client_identity_list) {
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(client_identity_list);
- struct s2n_array *known_psks = &conn->psk_params.psk_list;
+ struct s2n_offered_psk client_psk = { 0 };
conn->psk_params.chosen_psk = NULL;
- for (size_t i = 0; i < identities_length; i++) {
- struct s2n_blob wire_identity = { 0 };
- GUARD_AS_RESULT(s2n_blob_init(&wire_identity, identities[i].data, identities[i].length));
-
- struct s2n_psk *local_match = NULL;
- GUARD_RESULT(s2n_match_psk_identity(known_psks, &wire_identity, &local_match));
-
- /* When a local match is found we do not end this loop early in an attempt
- * to keep the server's known identities a secret and hide its ordering.
- */
- if (local_match != NULL && conn->psk_params.chosen_psk == NULL) {
- conn->psk_params.chosen_psk_wire_index = i;
- conn->psk_params.chosen_psk = local_match;
+ uint8_t rejected_count = 0;
+ while (s2n_offered_psk_list_has_next(client_identity_list) && (rejected_count < MAX_REJECTED_TICKETS)) {
+ RESULT_GUARD_POSIX(s2n_offered_psk_list_next(client_identity_list, &client_psk));
+ /* Select the first resumption PSK that can be decrypted */
+ if (s2n_offered_psk_list_choose_psk(client_identity_list, &client_psk) == S2N_SUCCESS) {
+ return S2N_RESULT_OK;
}
+ rejected_count++;
}
- return S2N_RESULT_OK;
-}
-
-static S2N_RESULT s2n_count_psk_identities(struct s2n_stuffer *input, uint16_t *identity_count)
-{
- ENSURE_REF(input);
- ENSURE_REF(identity_count);
-
- const size_t obfuscated_ticket_age_size = sizeof(uint32_t);
-
- *identity_count = 0;
- while (s2n_stuffer_data_available(input) > 0) {
- uint16_t identity_size = 0;
- GUARD_AS_RESULT(s2n_stuffer_read_uint16(input, &identity_size));
- GUARD_AS_RESULT(s2n_stuffer_skip_read(input, identity_size));
- GUARD_AS_RESULT(s2n_stuffer_skip_read(input, obfuscated_ticket_age_size));
- (*identity_count)++;
- }
- GUARD_AS_RESULT(s2n_stuffer_reread(input));
- return S2N_RESULT_OK;
+ RESULT_BAIL(S2N_ERR_INVALID_SESSION_TICKET);
}
static S2N_RESULT s2n_client_psk_recv_identity_list(struct s2n_connection *conn, struct s2n_stuffer *wire_identities_in)
{
- ENSURE_REF(conn);
- ENSURE_REF(wire_identities_in);
-
- uint16_t identities_count = 0;
- GUARD_RESULT(s2n_count_psk_identities(wire_identities_in, &identities_count));
- ENSURE_GT(identities_count, 0);
- ENSURE_LTE(identities_count, MAX_NUM_OF_PSK_IDENTITIES);
-
- DEFER_CLEANUP(struct s2n_blob wire_identities_blob = { 0 }, s2n_free);
- GUARD_AS_RESULT(s2n_alloc(&wire_identities_blob, identities_count * sizeof(struct s2n_psk_identity)));
- struct s2n_psk_identity *wire_identities = (struct s2n_psk_identity*)(void*) wire_identities_blob.data;
-
- uint16_t wire_index = 0;
- while (s2n_stuffer_data_available(wire_identities_in) > 0) {
- uint16_t identity_size = 0;
- GUARD_AS_RESULT(s2n_stuffer_read_uint16(wire_identities_in, &identity_size));
- ENSURE_GT(identity_size, 0);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn->config);
+ RESULT_ENSURE_REF(wire_identities_in);
- uint8_t *identity_data = s2n_stuffer_raw_read(wire_identities_in, identity_size);
- ENSURE_REF(identity_data);
-
- wire_identities[wire_index].data = identity_data;
- wire_identities[wire_index].length = identity_size;
-
- /**
- *= https://tools.ietf.org/rfc/rfc8446#section-4.2.11
- *# For identities established externally, an obfuscated_ticket_age of 0 SHOULD be
- *# used, and servers MUST ignore the value.
- */
- uint32_t obfuscated_ticket_age = 0;
- GUARD_AS_RESULT(s2n_stuffer_read_uint32(wire_identities_in, &obfuscated_ticket_age));
-
- wire_index++;
- }
+ struct s2n_offered_psk_list identity_list = {
+ .conn = conn,
+ .wire_data = *wire_identities_in,
+ };
if (conn->config->psk_selection_cb) {
- GUARD_AS_RESULT(conn->config->psk_selection_cb(conn, wire_identities, identities_count,
- &conn->psk_params.chosen_psk_wire_index));
- struct s2n_blob chosen_wire_identity = { 0 };
- GUARD_AS_RESULT(s2n_blob_init(&chosen_wire_identity,
- wire_identities[conn->psk_params.chosen_psk_wire_index].data,
- wire_identities[conn->psk_params.chosen_psk_wire_index].length));
- GUARD_RESULT(s2n_match_psk_identity(&conn->psk_params.psk_list, &chosen_wire_identity, &conn->psk_params.chosen_psk));
- } else {
- GUARD_RESULT(s2n_select_psk_identity(conn, wire_identities, identities_count));
+ RESULT_GUARD_POSIX(conn->config->psk_selection_cb(conn, conn->config->psk_selection_ctx, &identity_list));
+ } else if(conn->psk_params.type == S2N_PSK_TYPE_EXTERNAL) {
+ RESULT_GUARD(s2n_select_external_psk(conn, &identity_list));
+ } else if(conn->psk_params.type == S2N_PSK_TYPE_RESUMPTION) {
+ RESULT_GUARD(s2n_select_resumption_psk(conn, &identity_list));
}
- ENSURE_LT(conn->psk_params.chosen_psk_wire_index, identities_count);
- ENSURE_REF(conn->psk_params.chosen_psk);
-
+ RESULT_ENSURE_REF(conn->psk_params.chosen_psk);
return S2N_RESULT_OK;
}
static S2N_RESULT s2n_client_psk_recv_binder_list(struct s2n_connection *conn, struct s2n_blob *partial_client_hello,
struct s2n_stuffer *wire_binders_in)
{
- ENSURE_REF(conn);
- ENSURE_REF(wire_binders_in);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(wire_binders_in);
uint16_t wire_index = 0;
while (s2n_stuffer_data_available(wire_binders_in) > 0) {
uint8_t wire_binder_size = 0;
- GUARD_AS_RESULT(s2n_stuffer_read_uint8(wire_binders_in, &wire_binder_size));
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint8(wire_binders_in, &wire_binder_size));
uint8_t *wire_binder_data;
- ENSURE_REF(wire_binder_data = s2n_stuffer_raw_read(wire_binders_in, wire_binder_size));
+ RESULT_ENSURE_REF(wire_binder_data = s2n_stuffer_raw_read(wire_binders_in, wire_binder_size));
struct s2n_blob wire_binder = { 0 };
- GUARD_AS_RESULT(s2n_blob_init(&wire_binder, wire_binder_data, wire_binder_size));
+ RESULT_GUARD_POSIX(s2n_blob_init(&wire_binder, wire_binder_data, wire_binder_size));
if (wire_index == conn->psk_params.chosen_psk_wire_index) {
- GUARD_AS_RESULT(s2n_psk_verify_binder(conn, conn->psk_params.chosen_psk,
+ RESULT_GUARD_POSIX(s2n_psk_verify_binder(conn, conn->psk_params.chosen_psk,
partial_client_hello, &wire_binder));
return S2N_RESULT_OK;
}
wire_index++;
}
- BAIL(S2N_ERR_BAD_MESSAGE);
+ RESULT_BAIL(S2N_ERR_BAD_MESSAGE);
}
static S2N_RESULT s2n_client_psk_recv_identities(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn);
uint16_t identity_list_size = 0;
- GUARD_AS_RESULT(s2n_stuffer_read_uint16(extension, &identity_list_size));
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint16(extension, &identity_list_size));
uint8_t *identity_list_data;
- ENSURE_REF(identity_list_data = s2n_stuffer_raw_read(extension, identity_list_size));
+ RESULT_ENSURE_REF(identity_list_data = s2n_stuffer_raw_read(extension, identity_list_size));
struct s2n_blob identity_list_blob = { 0 };
- GUARD_AS_RESULT(s2n_blob_init(&identity_list_blob, identity_list_data, identity_list_size));
+ RESULT_GUARD_POSIX(s2n_blob_init(&identity_list_blob, identity_list_data, identity_list_size));
struct s2n_stuffer identity_list = { 0 };
- GUARD_AS_RESULT(s2n_stuffer_init(&identity_list, &identity_list_blob));
- GUARD_AS_RESULT(s2n_stuffer_skip_write(&identity_list, identity_list_blob.size));
+ RESULT_GUARD_POSIX(s2n_stuffer_init(&identity_list, &identity_list_blob));
+ RESULT_GUARD_POSIX(s2n_stuffer_skip_write(&identity_list, identity_list_blob.size));
return s2n_client_psk_recv_identity_list(conn, &identity_list);
}
static S2N_RESULT s2n_client_psk_recv_binders(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn);
uint16_t binder_list_size = 0;
- GUARD_AS_RESULT(s2n_stuffer_read_uint16(extension, &binder_list_size));
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint16(extension, &binder_list_size));
uint8_t *binder_list_data;
- ENSURE_REF(binder_list_data = s2n_stuffer_raw_read(extension, binder_list_size));
+ RESULT_ENSURE_REF(binder_list_data = s2n_stuffer_raw_read(extension, binder_list_size));
struct s2n_blob binder_list_blob = { 0 };
- GUARD_AS_RESULT(s2n_blob_init(&binder_list_blob, binder_list_data, binder_list_size));
+ RESULT_GUARD_POSIX(s2n_blob_init(&binder_list_blob, binder_list_data, binder_list_size));
struct s2n_stuffer binder_list = { 0 };
- GUARD_AS_RESULT(s2n_stuffer_init(&binder_list, &binder_list_blob));
- GUARD_AS_RESULT(s2n_stuffer_skip_write(&binder_list, binder_list_blob.size));
+ RESULT_GUARD_POSIX(s2n_stuffer_init(&binder_list, &binder_list_blob));
+ RESULT_GUARD_POSIX(s2n_stuffer_skip_write(&binder_list, binder_list_blob.size));
/* Record the ClientHello message up to but not including the binder list.
* This is required to calculate the binder for the chosen PSK. */
struct s2n_blob partial_client_hello = { 0 };
const struct s2n_stuffer *client_hello = &conn->handshake.io;
uint32_t binders_size = binder_list_blob.size + SIZE_OF_BINDER_LIST_SIZE;
- ENSURE_GTE(client_hello->write_cursor, binders_size);
+ RESULT_ENSURE_GTE(client_hello->write_cursor, binders_size);
uint16_t partial_client_hello_size = client_hello->write_cursor - binders_size;
- GUARD_AS_RESULT(s2n_blob_slice(&client_hello->blob, &partial_client_hello, 0, partial_client_hello_size));
+ RESULT_GUARD_POSIX(s2n_blob_slice(&client_hello->blob, &partial_client_hello, 0, partial_client_hello_size));
return s2n_client_psk_recv_binder_list(conn, &partial_client_hello, &binder_list);
}
int s2n_client_psk_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- notnull_check(conn);
-
- if (s2n_connection_get_protocol_version(conn) < S2N_TLS13) {
- return S2N_SUCCESS;
- }
+ POSIX_ENSURE_REF(conn);
/**
*= https://tools.ietf.org/rfc/rfc8446#section-4.2.11
@@ -352,11 +363,11 @@ int s2n_client_psk_recv(struct s2n_connection *conn, struct s2n_stuffer *extensi
*# the handshake with an "illegal_parameter" alert.
*/
s2n_extension_type_id psk_ext_id;
- GUARD(s2n_extension_supported_iana_value_to_id(TLS_EXTENSION_PRE_SHARED_KEY, &psk_ext_id));
- ne_check(conn->client_hello.extensions.count, 0);
+ POSIX_GUARD(s2n_extension_supported_iana_value_to_id(TLS_EXTENSION_PRE_SHARED_KEY, &psk_ext_id));
+ POSIX_ENSURE_NE(conn->client_hello.extensions.count, 0);
uint16_t last_wire_index = conn->client_hello.extensions.count - 1;
uint16_t extension_wire_index = conn->client_hello.extensions.parsed_extensions[psk_ext_id].wire_index;
- ENSURE_POSIX(extension_wire_index == last_wire_index, S2N_ERR_UNSUPPORTED_EXTENSION);
+ POSIX_ENSURE(extension_wire_index == last_wire_index, S2N_ERR_UNSUPPORTED_EXTENSION);
/**
*= https://tools.ietf.org/rfc/rfc8446#section-4.2.9
@@ -367,16 +378,16 @@ int s2n_client_psk_recv(struct s2n_connection *conn, struct s2n_stuffer *extensi
* required to be the last extension sent in the list.
*/
s2n_extension_type_id psk_ke_mode_ext_id;
- GUARD(s2n_extension_supported_iana_value_to_id(TLS_EXTENSION_PSK_KEY_EXCHANGE_MODES, &psk_ke_mode_ext_id));
- ENSURE_POSIX(S2N_CBIT_TEST(conn->extension_requests_received, psk_ke_mode_ext_id), S2N_ERR_MISSING_EXTENSION);
+ POSIX_GUARD(s2n_extension_supported_iana_value_to_id(TLS_EXTENSION_PSK_KEY_EXCHANGE_MODES, &psk_ke_mode_ext_id));
+ POSIX_ENSURE(S2N_CBIT_TEST(conn->extension_requests_received, psk_ke_mode_ext_id), S2N_ERR_MISSING_EXTENSION);
if (conn->psk_params.psk_ke_mode == S2N_PSK_DHE_KE) {
s2n_extension_type_id key_share_ext_id;
- GUARD(s2n_extension_supported_iana_value_to_id(TLS_EXTENSION_KEY_SHARE, &key_share_ext_id));
+ POSIX_GUARD(s2n_extension_supported_iana_value_to_id(TLS_EXTENSION_KEY_SHARE, &key_share_ext_id));
/* A key_share extension must have been received in order to use a pre-shared key
* in (EC)DHE key exchange mode.
*/
- ENSURE_POSIX(S2N_CBIT_TEST(conn->extension_requests_received, key_share_ext_id), S2N_ERR_MISSING_EXTENSION);
+ POSIX_ENSURE(S2N_CBIT_TEST(conn->extension_requests_received, key_share_ext_id), S2N_ERR_MISSING_EXTENSION);
} else {
/* s2n currently only supports pre-shared keys in (EC)DHE key exchange mode. If we receive keys with any other
* exchange mode we fall back to a full handshake.
@@ -401,7 +412,7 @@ int s2n_client_psk_recv(struct s2n_connection *conn, struct s2n_stuffer *extensi
*# value is not present or does not validate, the server MUST abort the
*# handshake.
*/
- GUARD_AS_POSIX(s2n_client_psk_recv_binders(conn, extension));
+ POSIX_GUARD_RESULT(s2n_client_psk_recv_binders(conn, extension));
}
/* At this point, we have either chosen a PSK or fallen back to a full handshake. */
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_renegotiation_info.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_renegotiation_info.c
index 3a012f47d1..772712fda4 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_renegotiation_info.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_renegotiation_info.c
@@ -36,7 +36,7 @@ static int s2n_client_renegotiation_recv(struct s2n_connection *conn, struct s2n
{
/* RFC5746 Section 3.2: The renegotiated_connection field is of zero length for the initial handshake. */
uint8_t renegotiated_connection_len;
- GUARD(s2n_stuffer_read_uint8(extension, &renegotiated_connection_len));
+ POSIX_GUARD(s2n_stuffer_read_uint8(extension, &renegotiated_connection_len));
S2N_ERROR_IF(s2n_stuffer_data_available(extension) || renegotiated_connection_len, S2N_ERR_NON_EMPTY_RENEGOTIATION_INFO);
conn->secure_renegotiation = 1;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_server_name.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_server_name.c
index 904976e4cc..0e69283a2c 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_server_name.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_server_name.c
@@ -39,47 +39,50 @@ const s2n_extension_type s2n_client_server_name_extension = {
static bool s2n_client_server_name_should_send(struct s2n_connection *conn)
{
- return conn && strlen(conn->server_name) > 0;
+ return conn && conn->server_name[0] != '\0';
}
static int s2n_client_server_name_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
struct s2n_stuffer_reservation server_name_list_size = {0};
- GUARD(s2n_stuffer_reserve_uint16(out, &server_name_list_size));
+ POSIX_GUARD(s2n_stuffer_reserve_uint16(out, &server_name_list_size));
/* NameType, as described by RFC6066.
* host_name is currently the only possible NameType defined. */
- GUARD(s2n_stuffer_write_uint8(out, S2N_NAME_TYPE_HOST_NAME));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, S2N_NAME_TYPE_HOST_NAME));
- GUARD(s2n_stuffer_write_uint16(out, strlen(conn->server_name)));
- GUARD(s2n_stuffer_write_bytes(out, (const uint8_t *) conn->server_name, strlen(conn->server_name)));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, strlen(conn->server_name)));
+ POSIX_GUARD(s2n_stuffer_write_bytes(out, (const uint8_t *) conn->server_name, strlen(conn->server_name)));
- GUARD(s2n_stuffer_write_vector_size(&server_name_list_size));
+ POSIX_GUARD(s2n_stuffer_write_vector_size(&server_name_list_size));
return S2N_SUCCESS;
}
+/* Read the extension up to the first item in ServerNameList. Store the first entry's length in server_name_len.
+ * For now s2n ignores all subsequent items in ServerNameList.
+ */
static int s2n_client_server_name_check(struct s2n_connection *conn, struct s2n_stuffer *extension, uint16_t *server_name_len)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
uint16_t size_of_all;
- GUARD(s2n_stuffer_read_uint16(extension, &size_of_all));
- lte_check(size_of_all, s2n_stuffer_data_available(extension));
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &size_of_all));
+ POSIX_ENSURE_LTE(size_of_all, s2n_stuffer_data_available(extension));
uint8_t server_name_type;
- GUARD(s2n_stuffer_read_uint8(extension, &server_name_type));
- eq_check(server_name_type, S2N_NAME_TYPE_HOST_NAME);
+ POSIX_GUARD(s2n_stuffer_read_uint8(extension, &server_name_type));
+ POSIX_ENSURE_EQ(server_name_type, S2N_NAME_TYPE_HOST_NAME);
- GUARD(s2n_stuffer_read_uint16(extension, server_name_len));
- lt_check(*server_name_len, sizeof(conn->server_name));
- lte_check(*server_name_len, s2n_stuffer_data_available(extension));
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, server_name_len));
+ POSIX_ENSURE_LT(*server_name_len, sizeof(conn->server_name));
+ POSIX_ENSURE_LTE(*server_name_len, s2n_stuffer_data_available(extension));
return S2N_SUCCESS;
}
static int s2n_client_server_name_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
/* Exit early if we've already parsed the server name */
if (conn->server_name[0]) {
@@ -93,8 +96,8 @@ static int s2n_client_server_name_recv(struct s2n_connection *conn, struct s2n_s
}
uint8_t *server_name;
- notnull_check(server_name = s2n_stuffer_raw_read(extension, server_name_len));
- memcpy_check(conn->server_name, server_name, server_name_len);
+ POSIX_ENSURE_REF(server_name = s2n_stuffer_raw_read(extension, server_name_len));
+ POSIX_CHECKED_MEMCPY(conn->server_name, server_name, server_name_len);
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_session_ticket.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_session_ticket.c
index 96ef1b7308..ca9c5e96e7 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_session_ticket.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_session_ticket.c
@@ -17,6 +17,8 @@
#include <stdint.h>
#include "tls/extensions/s2n_client_session_ticket.h"
+
+#include "tls/extensions/s2n_client_psk.h"
#include "tls/s2n_tls.h"
#include "tls/s2n_tls_parameters.h"
#include "tls/s2n_resume.h"
@@ -38,18 +40,18 @@ const s2n_extension_type s2n_client_session_ticket_extension = {
static bool s2n_client_session_ticket_should_send(struct s2n_connection *conn)
{
- return conn->config->use_tickets;
+ return conn->config->use_tickets && !s2n_client_psk_should_send(conn);
}
static int s2n_client_session_ticket_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- GUARD(s2n_stuffer_write(out, &conn->client_ticket));
+ POSIX_GUARD(s2n_stuffer_write(out, &conn->client_ticket));
return S2N_SUCCESS;
}
static int s2n_client_session_ticket_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- if (conn->config->use_tickets != 1) {
+ if (conn->config->use_tickets != 1 || conn->actual_protocol_version > S2N_TLS12) {
/* Ignore the extension. */
return S2N_SUCCESS;
}
@@ -59,9 +61,9 @@ static int s2n_client_session_ticket_recv(struct s2n_connection *conn, struct s2
return S2N_SUCCESS;
}
- if (s2n_stuffer_data_available(extension) == S2N_TICKET_SIZE_IN_BYTES) {
+ if (s2n_stuffer_data_available(extension) == S2N_TLS12_TICKET_SIZE_IN_BYTES) {
conn->session_ticket_status = S2N_DECRYPT_TICKET;
- GUARD(s2n_stuffer_copy(extension, &conn->client_ticket_to_decrypt, S2N_TICKET_SIZE_IN_BYTES));
+ POSIX_GUARD(s2n_stuffer_copy(extension, &conn->client_ticket_to_decrypt, S2N_TLS12_TICKET_SIZE_IN_BYTES));
} else if (s2n_config_is_encrypt_decrypt_key_available(conn->config) == 1) {
conn->session_ticket_status = S2N_NEW_TICKET;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_status_request.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_status_request.c
index e5144fba8b..608e8d0a55 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_status_request.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_status_request.c
@@ -42,20 +42,20 @@ static bool s2n_client_status_request_should_send(struct s2n_connection *conn)
static int s2n_client_status_request_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- GUARD(s2n_stuffer_write_uint8(out, (uint8_t) conn->config->status_request_type));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, (uint8_t) conn->config->status_request_type));
/* responder_id_list
*
* From https://tools.ietf.org/html/rfc6066#section-8:
* A zero-length "responder_id_list" sequence has the special meaning that the responders are implicitly
* known to the server, e.g., by prior arrangement */
- GUARD(s2n_stuffer_write_uint16(out, 0));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, 0));
/* request_extensions
*
* From https://tools.ietf.org/html/rfc6066#section-8:
* A zero-length "request_extensions" value means that there are no extensions. */
- GUARD(s2n_stuffer_write_uint16(out, 0));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, 0));
return S2N_SUCCESS;
}
@@ -68,7 +68,7 @@ static int s2n_client_status_request_recv(struct s2n_connection *conn, struct s2
}
uint8_t type;
- GUARD(s2n_stuffer_read_uint8(extension, &type));
+ POSIX_GUARD(s2n_stuffer_read_uint8(extension, &type));
if (type != (uint8_t) S2N_STATUS_REQUEST_OCSP) {
/* We only support OCSP (type 1), ignore the extension */
return S2N_SUCCESS;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_supported_groups.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_supported_groups.c
index 4cb2f9e3c5..1b8ead056d 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_supported_groups.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_supported_groups.c
@@ -48,33 +48,33 @@ bool s2n_extension_should_send_if_ecc_enabled(struct s2n_connection *conn)
static int s2n_client_supported_groups_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
+ POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
+ POSIX_ENSURE_REF(ecc_pref);
const struct s2n_kem_preferences *kem_pref = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
- notnull_check(kem_pref);
+ POSIX_GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
+ POSIX_ENSURE_REF(kem_pref);
/* Group list len */
struct s2n_stuffer_reservation group_list_len = { 0 };
- GUARD(s2n_stuffer_reserve_uint16(out, &group_list_len));
+ POSIX_GUARD(s2n_stuffer_reserve_uint16(out, &group_list_len));
/* Send KEM groups list first */
if (s2n_connection_get_protocol_version(conn) >= S2N_TLS13 && s2n_pq_is_enabled()) {
for (size_t i = 0; i < kem_pref->tls13_kem_group_count; i++) {
- GUARD(s2n_stuffer_write_uint16(out, kem_pref->tls13_kem_groups[i]->iana_id));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, kem_pref->tls13_kem_groups[i]->iana_id));
}
}
/* Then send curve list */
for (size_t i = 0; i < ecc_pref->count; i++) {
- GUARD(s2n_stuffer_write_uint16(out, ecc_pref->ecc_curves[i]->iana_id));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, ecc_pref->ecc_curves[i]->iana_id));
}
- GUARD(s2n_stuffer_write_vector_size(&group_list_len));
+ POSIX_GUARD(s2n_stuffer_write_vector_size(&group_list_len));
return S2N_SUCCESS;
}
@@ -83,16 +83,16 @@ static int s2n_client_supported_groups_send(struct s2n_connection *conn, struct
* mutually_supported_kem_groups array based on the received IANA ID. Will
* ignore unrecognized IANA IDs (and return success). */
static int s2n_client_supported_groups_recv_iana_id(struct s2n_connection *conn, uint16_t iana_id) {
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
+ POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
+ POSIX_ENSURE_REF(ecc_pref);
for (size_t i = 0; i < ecc_pref->count; i++) {
const struct s2n_ecc_named_curve *supported_curve = ecc_pref->ecc_curves[i];
if (iana_id == supported_curve->iana_id) {
- conn->secure.mutually_supported_curves[i] = supported_curve;
+ conn->kex_params.mutually_supported_curves[i] = supported_curve;
return S2N_SUCCESS;
}
}
@@ -103,13 +103,13 @@ static int s2n_client_supported_groups_recv_iana_id(struct s2n_connection *conn,
}
const struct s2n_kem_preferences *kem_pref = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
- notnull_check(kem_pref);
+ POSIX_GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
+ POSIX_ENSURE_REF(kem_pref);
for (size_t i = 0; i < kem_pref->tls13_kem_group_count; i++) {
const struct s2n_kem_group *supported_kem_group = kem_pref->tls13_kem_groups[i];
if (iana_id == supported_kem_group->iana_id) {
- conn->secure.mutually_supported_kem_groups[i] = supported_kem_group;
+ conn->kex_params.mutually_supported_kem_groups[i] = supported_kem_group;
return S2N_SUCCESS;
}
}
@@ -118,40 +118,40 @@ static int s2n_client_supported_groups_recv_iana_id(struct s2n_connection *conn,
}
static int s2n_choose_supported_group(struct s2n_connection *conn) {
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
+ POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
+ POSIX_ENSURE_REF(ecc_pref);
const struct s2n_kem_preferences *kem_pref = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
- notnull_check(kem_pref);
+ POSIX_GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
+ POSIX_ENSURE_REF(kem_pref);
/* Ensure that only the intended group will be non-NULL (if no group is chosen, everything
* should be NULL). */
- conn->secure.server_kem_group_params.kem_group = NULL;
- conn->secure.server_kem_group_params.ecc_params.negotiated_curve = NULL;
- conn->secure.server_kem_group_params.kem_params.kem = NULL;
- conn->secure.server_ecc_evp_params.negotiated_curve = NULL;
+ conn->kex_params.server_kem_group_params.kem_group = NULL;
+ conn->kex_params.server_kem_group_params.ecc_params.negotiated_curve = NULL;
+ conn->kex_params.server_kem_group_params.kem_params.kem = NULL;
+ conn->kex_params.server_ecc_evp_params.negotiated_curve = NULL;
/* Prefer to negotiate hybrid PQ over ECC. If PQ is disabled, we will never choose a
* PQ group because the mutually_supported_kem_groups array will not have been
* populated with anything. */
for (size_t i = 0; i < kem_pref->tls13_kem_group_count; i++) {
- const struct s2n_kem_group *candidate_kem_group = conn->secure.mutually_supported_kem_groups[i];
+ const struct s2n_kem_group *candidate_kem_group = conn->kex_params.mutually_supported_kem_groups[i];
if (candidate_kem_group != NULL) {
- conn->secure.server_kem_group_params.kem_group = candidate_kem_group;
- conn->secure.server_kem_group_params.ecc_params.negotiated_curve = candidate_kem_group->curve;
- conn->secure.server_kem_group_params.kem_params.kem = candidate_kem_group->kem;
+ conn->kex_params.server_kem_group_params.kem_group = candidate_kem_group;
+ conn->kex_params.server_kem_group_params.ecc_params.negotiated_curve = candidate_kem_group->curve;
+ conn->kex_params.server_kem_group_params.kem_params.kem = candidate_kem_group->kem;
return S2N_SUCCESS;
}
}
for (size_t i = 0; i < ecc_pref->count; i++) {
- const struct s2n_ecc_named_curve *candidate_curve = conn->secure.mutually_supported_curves[i];
+ const struct s2n_ecc_named_curve *candidate_curve = conn->kex_params.mutually_supported_curves[i];
if (candidate_curve != NULL) {
- conn->secure.server_ecc_evp_params.negotiated_curve = candidate_curve;
+ conn->kex_params.server_ecc_evp_params.negotiated_curve = candidate_curve;
return S2N_SUCCESS;
}
}
@@ -160,11 +160,11 @@ static int s2n_choose_supported_group(struct s2n_connection *conn) {
}
static int s2n_client_supported_groups_recv(struct s2n_connection *conn, struct s2n_stuffer *extension) {
- notnull_check(conn);
- notnull_check(extension);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(extension);
uint16_t size_of_all;
- GUARD(s2n_stuffer_read_uint16(extension, &size_of_all));
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &size_of_all));
if (size_of_all > s2n_stuffer_data_available(extension) || (size_of_all % sizeof(uint16_t))) {
/* Malformed length, ignore the extension */
return S2N_SUCCESS;
@@ -172,11 +172,11 @@ static int s2n_client_supported_groups_recv(struct s2n_connection *conn, struct
for (size_t i = 0; i < (size_of_all / sizeof(uint16_t)); i++) {
uint16_t iana_id;
- GUARD(s2n_stuffer_read_uint16(extension, &iana_id));
- GUARD(s2n_client_supported_groups_recv_iana_id(conn, iana_id));
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &iana_id));
+ POSIX_GUARD(s2n_client_supported_groups_recv_iana_id(conn, iana_id));
}
- GUARD(s2n_choose_supported_group(conn));
+ POSIX_GUARD(s2n_choose_supported_group(conn));
return S2N_SUCCESS;
}
@@ -185,12 +185,12 @@ static int s2n_client_supported_groups_recv(struct s2n_connection *conn, struct
int s2n_extensions_client_supported_groups_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- GUARD(s2n_extension_send(&s2n_client_supported_groups_extension, conn, out));
+ POSIX_GUARD(s2n_extension_send(&s2n_client_supported_groups_extension, conn, out));
/* The original send method also sent ec point formats. To avoid breaking
* anything, I'm going to let it continue writing point formats.
*/
- GUARD(s2n_extension_send(&s2n_client_ec_point_format_extension, conn, out));
+ POSIX_GUARD(s2n_extension_send(&s2n_client_ec_point_format_extension, conn, out));
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_supported_versions.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_supported_versions.c
index d0ec8cb329..bbc37e475a 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_client_supported_versions.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_client_supported_versions.c
@@ -59,15 +59,16 @@ const s2n_extension_type s2n_client_supported_versions_extension = {
static int s2n_client_supported_versions_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
uint8_t highest_supported_version = conn->client_protocol_version;
- uint8_t minimum_supported_version;
- GUARD(s2n_connection_get_minimum_supported_version(conn, &minimum_supported_version));
+ uint8_t minimum_supported_version = s2n_unknown_protocol_version;
+ POSIX_GUARD_RESULT(s2n_connection_get_minimum_supported_version(conn, &minimum_supported_version));
+ POSIX_ENSURE(highest_supported_version >= minimum_supported_version, S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
uint8_t version_list_length = highest_supported_version - minimum_supported_version + 1;
- GUARD(s2n_stuffer_write_uint8(out, version_list_length * S2N_TLS_PROTOCOL_VERSION_LEN));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, version_list_length * S2N_TLS_PROTOCOL_VERSION_LEN));
for (uint8_t i = highest_supported_version; i >= minimum_supported_version; i--) {
- GUARD(s2n_stuffer_write_uint8(out, i / 10));
- GUARD(s2n_stuffer_write_uint8(out, i % 10));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, i / 10));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, i % 10));
}
return S2N_SUCCESS;
@@ -75,11 +76,11 @@ static int s2n_client_supported_versions_send(struct s2n_connection *conn, struc
static int s2n_extensions_client_supported_versions_process(struct s2n_connection *conn, struct s2n_stuffer *extension) {
uint8_t highest_supported_version = conn->server_protocol_version;
- uint8_t minimum_supported_version;
- GUARD(s2n_connection_get_minimum_supported_version(conn, &minimum_supported_version));
+ uint8_t minimum_supported_version = s2n_unknown_protocol_version;
+ POSIX_GUARD_RESULT(s2n_connection_get_minimum_supported_version(conn, &minimum_supported_version));
uint8_t size_of_version_list;
- GUARD(s2n_stuffer_read_uint8(extension, &size_of_version_list));
+ POSIX_GUARD(s2n_stuffer_read_uint8(extension, &size_of_version_list));
S2N_ERROR_IF(size_of_version_list != s2n_stuffer_data_available(extension), S2N_ERR_BAD_MESSAGE);
S2N_ERROR_IF(size_of_version_list % S2N_TLS_PROTOCOL_VERSION_LEN != 0, S2N_ERR_BAD_MESSAGE);
@@ -88,7 +89,7 @@ static int s2n_extensions_client_supported_versions_process(struct s2n_connectio
for (int i = 0; i < size_of_version_list; i += S2N_TLS_PROTOCOL_VERSION_LEN) {
uint8_t client_version_parts[S2N_TLS_PROTOCOL_VERSION_LEN];
- GUARD(s2n_stuffer_read_bytes(extension, client_version_parts, S2N_TLS_PROTOCOL_VERSION_LEN));
+ POSIX_GUARD(s2n_stuffer_read_bytes(extension, client_version_parts, S2N_TLS_PROTOCOL_VERSION_LEN));
/* If the client version is outside of our supported versions, then ignore the value.
* S2N does not support SSLv2 except for upgrading connections. Since this extension is
@@ -127,7 +128,7 @@ static int s2n_client_supported_versions_recv(struct s2n_connection *conn, struc
if (s2n_extensions_client_supported_versions_process(conn, in) < 0) {
s2n_queue_reader_unsupported_protocol_version_alert(conn);
- S2N_ERROR(S2N_ERR_BAD_MESSAGE);
+ POSIX_BAIL(S2N_ERR_BAD_MESSAGE);
}
return S2N_SUCCESS;
}
@@ -135,8 +136,8 @@ static int s2n_client_supported_versions_recv(struct s2n_connection *conn, struc
/* Old-style extension functions -- remove after extensions refactor is complete */
int s2n_extensions_client_supported_versions_size(struct s2n_connection *conn) {
- uint8_t minimum_supported_version;
- GUARD(s2n_connection_get_minimum_supported_version(conn, &minimum_supported_version));
+ uint8_t minimum_supported_version = s2n_unknown_protocol_version;
+ POSIX_GUARD_RESULT(s2n_connection_get_minimum_supported_version(conn, &minimum_supported_version));
uint8_t highest_supported_version = conn->client_protocol_version;
uint8_t version_list_length = highest_supported_version - minimum_supported_version + 1;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_cookie.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_cookie.c
index 7e8885bc0b..18940d7191 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_cookie.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_cookie.c
@@ -22,6 +22,7 @@
const s2n_extension_type s2n_client_cookie_extension = {
.iana_value = TLS_EXTENSION_COOKIE,
+ .minimum_version = S2N_TLS13,
.is_response = true,
.send = s2n_extension_send_noop,
.recv = s2n_extension_recv_noop,
@@ -35,6 +36,7 @@ static int s2n_cookie_recv(struct s2n_connection *conn, struct s2n_stuffer *exte
const s2n_extension_type s2n_server_cookie_extension = {
.iana_value = TLS_EXTENSION_COOKIE,
+ .minimum_version = S2N_TLS13,
.is_response = false,
.send = s2n_cookie_send,
.recv = s2n_cookie_recv,
@@ -44,33 +46,29 @@ const s2n_extension_type s2n_server_cookie_extension = {
static bool s2n_cookie_should_send(struct s2n_connection *conn)
{
- return s2n_extension_send_if_tls13_connection(conn)
- && conn && s2n_stuffer_data_available(&conn->cookie_stuffer) > 0;
+ return conn && s2n_stuffer_data_available(&conn->cookie_stuffer) > 0;
}
static int s2n_cookie_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
uint16_t cookie_size = s2n_stuffer_data_available(&conn->cookie_stuffer);
- GUARD(s2n_stuffer_write_uint16(out, cookie_size));
- GUARD(s2n_stuffer_copy(&conn->cookie_stuffer, out, cookie_size));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, cookie_size));
+ POSIX_GUARD(s2n_stuffer_copy(&conn->cookie_stuffer, out, cookie_size));
return S2N_SUCCESS;
}
static int s2n_cookie_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- notnull_check(conn);
- if (s2n_connection_get_protocol_version(conn) < S2N_TLS13) {
- return S2N_SUCCESS;
- }
+ POSIX_ENSURE_REF(conn);
uint16_t cookie_len;
- GUARD(s2n_stuffer_read_uint16(extension, &cookie_len));
- ENSURE_POSIX(s2n_stuffer_data_available(extension) == cookie_len, S2N_ERR_BAD_MESSAGE);
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &cookie_len));
+ POSIX_ENSURE(s2n_stuffer_data_available(extension) == cookie_len, S2N_ERR_BAD_MESSAGE);
- GUARD(s2n_stuffer_wipe(&conn->cookie_stuffer));
- GUARD(s2n_stuffer_resize(&conn->cookie_stuffer, cookie_len));
- GUARD(s2n_stuffer_copy(extension, &conn->cookie_stuffer, cookie_len));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->cookie_stuffer));
+ POSIX_GUARD(s2n_stuffer_resize(&conn->cookie_stuffer, cookie_len));
+ POSIX_GUARD(s2n_stuffer_copy(extension, &conn->cookie_stuffer, cookie_len));
return S2N_SUCCESS;
}
@@ -78,7 +76,7 @@ static int s2n_cookie_recv(struct s2n_connection *conn, struct s2n_stuffer *exte
int s2n_extensions_cookie_size(struct s2n_connection *conn)
{
- GUARD(s2n_stuffer_reread(&conn->cookie_stuffer));
+ POSIX_GUARD(s2n_stuffer_reread(&conn->cookie_stuffer));
if (s2n_stuffer_data_available(&conn->cookie_stuffer) == 0) {
return 0;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_early_data_indication.h b/contrib/restricted/aws/s2n/tls/extensions/s2n_early_data_indication.h
new file mode 100644
index 0000000000..8e78c11cba
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_early_data_indication.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#pragma once
+
+#include "tls/extensions/s2n_extension_type.h"
+
+extern const s2n_extension_type s2n_client_early_data_indication_extension;
+extern const s2n_extension_type s2n_server_early_data_indication_extension;
+extern const s2n_extension_type s2n_nst_early_data_indication_extension;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_ec_point_format.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_ec_point_format.c
index cb720581a6..00d0a240f9 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_ec_point_format.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_ec_point_format.c
@@ -54,10 +54,10 @@ static bool s2n_server_ec_point_format_should_send(struct s2n_connection *conn)
static int s2n_ec_point_format_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
/* Point format list len. We only support one. */
- GUARD(s2n_stuffer_write_uint8(out, 1));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, 1));
/* Only allow uncompressed format */
- GUARD(s2n_stuffer_write_uint8(out, TLS_EC_POINT_FORMAT_UNCOMPRESSED));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, TLS_EC_POINT_FORMAT_UNCOMPRESSED));
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/utils/s2n_str.h b/contrib/restricted/aws/s2n/tls/extensions/s2n_ems.h
index a0ca67c3ba..7478332c34 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_str.h
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_ems.h
@@ -12,6 +12,10 @@
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
+
#pragma once
-extern char *s2n_strcpy(char *buf, char *last, const char *str);
+#include "tls/extensions/s2n_extension_type.h"
+
+extern const s2n_extension_type s2n_client_ems_extension;
+extern const s2n_extension_type s2n_server_ems_extension;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_list.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_list.c
index 5188535bf4..25000e56a9 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_list.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_list.c
@@ -17,7 +17,7 @@
#include "s2n_extension_type.h"
#include "s2n_extension_type_lists.h"
-#include <s2n.h>
+#include "api/s2n.h"
#include "error/s2n_errno.h"
#include "utils/s2n_safety.h"
@@ -29,46 +29,46 @@ static const s2n_parsed_extension empty_parsed_extensions[S2N_PARSED_EXTENSIONS_
int s2n_extension_list_send(s2n_extension_list_id list_type, struct s2n_connection *conn, struct s2n_stuffer *out)
{
s2n_extension_type_list *extension_type_list;
- GUARD(s2n_extension_type_list_get(list_type, &extension_type_list));
+ POSIX_GUARD(s2n_extension_type_list_get(list_type, &extension_type_list));
struct s2n_stuffer_reservation total_extensions_size = {0};
- GUARD(s2n_stuffer_reserve_uint16(out, &total_extensions_size));
+ POSIX_GUARD(s2n_stuffer_reserve_uint16(out, &total_extensions_size));
for (int i = 0; i < extension_type_list->count; i++) {
- GUARD(s2n_extension_send(extension_type_list->extension_types[i], conn, out));
+ POSIX_GUARD(s2n_extension_send(extension_type_list->extension_types[i], conn, out));
}
- GUARD(s2n_stuffer_write_vector_size(&total_extensions_size));
+ POSIX_GUARD(s2n_stuffer_write_vector_size(&total_extensions_size));
return S2N_SUCCESS;
}
int s2n_extension_list_recv(s2n_extension_list_id list_type, struct s2n_connection *conn, struct s2n_stuffer *in)
{
s2n_parsed_extensions_list parsed_extension_list = { 0 };
- GUARD(s2n_extension_list_parse(in, &parsed_extension_list));
- GUARD(s2n_extension_list_process(list_type, conn, &parsed_extension_list));
+ POSIX_GUARD(s2n_extension_list_parse(in, &parsed_extension_list));
+ POSIX_GUARD(s2n_extension_list_process(list_type, conn, &parsed_extension_list));
return S2N_SUCCESS;
}
static int s2n_extension_process_impl(const s2n_extension_type *extension_type, s2n_extension_type_id extension_id,
struct s2n_connection *conn, s2n_parsed_extension *parsed_extensions)
{
- notnull_check(extension_type);
- notnull_check(parsed_extensions);
+ POSIX_ENSURE_REF(extension_type);
+ POSIX_ENSURE_REF(parsed_extensions);
if (s2n_parsed_extension_is_empty(&parsed_extensions[extension_id])) {
- GUARD(s2n_extension_is_missing(extension_type, conn));
+ POSIX_GUARD(s2n_extension_is_missing(extension_type, conn));
return S2N_SUCCESS;
}
- ENSURE_POSIX(parsed_extensions[extension_id].extension_type == extension_type->iana_value,
+ POSIX_ENSURE(parsed_extensions[extension_id].extension_type == extension_type->iana_value,
S2N_ERR_INVALID_PARSED_EXTENSIONS);
struct s2n_stuffer extension_stuffer;
- GUARD(s2n_stuffer_init(&extension_stuffer, &parsed_extensions[extension_id].extension));
- GUARD(s2n_stuffer_skip_write(&extension_stuffer, parsed_extensions[extension_id].extension.size));
+ POSIX_GUARD(s2n_stuffer_init(&extension_stuffer, &parsed_extensions[extension_id].extension));
+ POSIX_GUARD(s2n_stuffer_skip_write(&extension_stuffer, parsed_extensions[extension_id].extension.size));
- GUARD(s2n_extension_recv(extension_type, conn, &extension_stuffer));
+ POSIX_GUARD(s2n_extension_recv(extension_type, conn, &extension_stuffer));
return S2N_SUCCESS;
}
@@ -76,11 +76,11 @@ static int s2n_extension_process_impl(const s2n_extension_type *extension_type,
int s2n_extension_process(const s2n_extension_type *extension_type, struct s2n_connection *conn,
s2n_parsed_extensions_list *parsed_extension_list)
{
- notnull_check(parsed_extension_list);
- notnull_check(extension_type);
+ POSIX_ENSURE_REF(parsed_extension_list);
+ POSIX_ENSURE_REF(extension_type);
s2n_extension_type_id extension_id;
- GUARD(s2n_extension_supported_iana_value_to_id(extension_type->iana_value, &extension_id));
+ POSIX_GUARD(s2n_extension_supported_iana_value_to_id(extension_type->iana_value, &extension_id));
int result = s2n_extension_process_impl(extension_type, extension_id, conn, parsed_extension_list->parsed_extensions);
@@ -94,13 +94,13 @@ int s2n_extension_process(const s2n_extension_type *extension_type, struct s2n_c
int s2n_extension_list_process(s2n_extension_list_id list_type, struct s2n_connection *conn,
s2n_parsed_extensions_list *parsed_extension_list)
{
- notnull_check(parsed_extension_list);
+ POSIX_ENSURE_REF(parsed_extension_list);
s2n_extension_type_list *extension_type_list;
- GUARD(s2n_extension_type_list_get(list_type, &extension_type_list));
+ POSIX_GUARD(s2n_extension_type_list_get(list_type, &extension_type_list));
for (int i = 0; i < extension_type_list->count; i++) {
- GUARD(s2n_extension_process(extension_type_list->extension_types[i],
+ POSIX_GUARD(s2n_extension_process(extension_type_list->extension_types[i],
conn, parsed_extension_list));
}
@@ -122,19 +122,19 @@ int s2n_extension_list_process(s2n_extension_list_id list_type, struct s2n_conne
static int s2n_extension_parse(struct s2n_stuffer *in, s2n_parsed_extension *parsed_extensions, uint16_t *wire_index)
{
- notnull_check(parsed_extensions);
- notnull_check(wire_index);
+ POSIX_ENSURE_REF(parsed_extensions);
+ POSIX_ENSURE_REF(wire_index);
uint16_t extension_type;
- ENSURE_POSIX(s2n_stuffer_read_uint16(in, &extension_type) == S2N_SUCCESS,
+ POSIX_ENSURE(s2n_stuffer_read_uint16(in, &extension_type) == S2N_SUCCESS,
S2N_ERR_BAD_MESSAGE);
uint16_t extension_size;
- ENSURE_POSIX(s2n_stuffer_read_uint16(in, &extension_size) == S2N_SUCCESS,
+ POSIX_ENSURE(s2n_stuffer_read_uint16(in, &extension_size) == S2N_SUCCESS,
S2N_ERR_BAD_MESSAGE);
uint8_t *extension_data = s2n_stuffer_raw_read(in, extension_size);
- ENSURE_POSIX(extension_data != NULL, S2N_ERR_BAD_MESSAGE);
+ POSIX_ENSURE(extension_data != NULL, S2N_ERR_BAD_MESSAGE);
s2n_extension_type_id extension_id;
if (s2n_extension_supported_iana_value_to_id(extension_type, &extension_id) != S2N_SUCCESS) {
@@ -145,13 +145,13 @@ static int s2n_extension_parse(struct s2n_stuffer *in, s2n_parsed_extension *par
s2n_parsed_extension *parsed_extension = &parsed_extensions[extension_id];
/* Error if extension is a duplicate */
- ENSURE_POSIX(s2n_parsed_extension_is_empty(parsed_extension),
+ POSIX_ENSURE(s2n_parsed_extension_is_empty(parsed_extension),
S2N_ERR_DUPLICATE_EXTENSION);
/* Fill in parsed extension */
parsed_extension->extension_type = extension_type;
parsed_extension->wire_index = *wire_index;
- GUARD(s2n_blob_init(&parsed_extension->extension, extension_data, extension_size));
+ POSIX_GUARD(s2n_blob_init(&parsed_extension->extension, extension_data, extension_size));
(*wire_index)++;
return S2N_SUCCESS;
@@ -159,10 +159,10 @@ static int s2n_extension_parse(struct s2n_stuffer *in, s2n_parsed_extension *par
int s2n_extension_list_parse(struct s2n_stuffer *in, s2n_parsed_extensions_list *parsed_extension_list)
{
- notnull_check(in);
- notnull_check(parsed_extension_list);
+ POSIX_ENSURE_REF(in);
+ POSIX_ENSURE_REF(parsed_extension_list);
- memset_check((s2n_parsed_extension*) parsed_extension_list->parsed_extensions,
+ POSIX_CHECKED_MEMSET((s2n_parsed_extension*) parsed_extension_list->parsed_extensions,
0, sizeof(parsed_extension_list->parsed_extensions));
uint16_t total_extensions_size;
@@ -171,17 +171,17 @@ int s2n_extension_list_parse(struct s2n_stuffer *in, s2n_parsed_extensions_list
}
uint8_t *extensions_data = s2n_stuffer_raw_read(in, total_extensions_size);
- ENSURE_POSIX(extensions_data != NULL, S2N_ERR_BAD_MESSAGE);
+ POSIX_ENSURE(extensions_data != NULL, S2N_ERR_BAD_MESSAGE);
- GUARD(s2n_blob_init(&parsed_extension_list->raw, extensions_data, total_extensions_size));
+ POSIX_GUARD(s2n_blob_init(&parsed_extension_list->raw, extensions_data, total_extensions_size));
struct s2n_stuffer extensions_stuffer;
- GUARD(s2n_stuffer_init(&extensions_stuffer, &parsed_extension_list->raw));
- GUARD(s2n_stuffer_skip_write(&extensions_stuffer, total_extensions_size));
+ POSIX_GUARD(s2n_stuffer_init(&extensions_stuffer, &parsed_extension_list->raw));
+ POSIX_GUARD(s2n_stuffer_skip_write(&extensions_stuffer, total_extensions_size));
uint16_t wire_index = 0;
while (s2n_stuffer_data_available(&extensions_stuffer)) {
- GUARD(s2n_extension_parse(&extensions_stuffer, parsed_extension_list->parsed_extensions, &wire_index));
+ POSIX_GUARD(s2n_extension_parse(&extensions_stuffer, parsed_extension_list->parsed_extensions, &wire_index));
}
parsed_extension_list->count = wire_index;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_list.h b/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_list.h
index dcf7c9da9b..a26eb3a89c 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_list.h
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_list.h
@@ -34,11 +34,13 @@ typedef struct {
typedef enum {
S2N_EXTENSION_LIST_CLIENT_HELLO = 0,
+ S2N_EXTENSION_LIST_HELLO_RETRY_REQUEST,
S2N_EXTENSION_LIST_SERVER_HELLO_DEFAULT,
S2N_EXTENSION_LIST_SERVER_HELLO_TLS13,
S2N_EXTENSION_LIST_ENCRYPTED_EXTENSIONS,
S2N_EXTENSION_LIST_CERT_REQ,
S2N_EXTENSION_LIST_CERTIFICATE,
+ S2N_EXTENSION_LIST_NST,
S2N_EXTENSION_LIST_EMPTY,
S2N_EXTENSION_LIST_IDS_COUNT,
} s2n_extension_list_id;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type.c
index 040c57a16c..1c808f1d8c 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type.c
@@ -13,7 +13,7 @@
* permissions and limitations under the License.
*/
-#include <s2n.h>
+#include "api/s2n.h"
#include "error/s2n_errno.h"
#include "tls/extensions/s2n_extension_type.h"
@@ -72,7 +72,7 @@ s2n_extension_type_id s2n_extension_iana_value_to_id(const uint16_t iana_value)
int s2n_extension_supported_iana_value_to_id(const uint16_t iana_value, s2n_extension_type_id *internal_id)
{
- notnull_check(internal_id);
+ POSIX_ENSURE_REF(internal_id);
*internal_id = s2n_extension_iana_value_to_id(iana_value);
S2N_ERROR_IF(*internal_id == s2n_unsupported_extension, S2N_ERR_UNRECOGNIZED_EXTENSION);
@@ -81,13 +81,13 @@ int s2n_extension_supported_iana_value_to_id(const uint16_t iana_value, s2n_exte
int s2n_extension_send(const s2n_extension_type *extension_type, struct s2n_connection *conn, struct s2n_stuffer *out)
{
- notnull_check(extension_type);
- notnull_check(extension_type->should_send);
- notnull_check(extension_type->send);
- notnull_check(conn);
+ POSIX_ENSURE_REF(extension_type);
+ POSIX_ENSURE_REF(extension_type->should_send);
+ POSIX_ENSURE_REF(extension_type->send);
+ POSIX_ENSURE_REF(conn);
s2n_extension_type_id extension_id;
- GUARD(s2n_extension_supported_iana_value_to_id(extension_type->iana_value, &extension_id));
+ POSIX_GUARD(s2n_extension_supported_iana_value_to_id(extension_type->iana_value, &extension_id));
/* Do not send response if request not received. */
if (extension_type->is_response &&
@@ -95,23 +95,28 @@ int s2n_extension_send(const s2n_extension_type *extension_type, struct s2n_conn
return S2N_SUCCESS;
}
+ /* Do not send an extension that is not valid for the protocol version */
+ if (extension_type->minimum_version > conn->actual_protocol_version) {
+ return S2N_SUCCESS;
+ }
+
/* Check if we need to send. Some extensions are only sent if specific conditions are met. */
if (!extension_type->should_send(conn)) {
return S2N_SUCCESS;
}
/* Write extension type */
- GUARD(s2n_stuffer_write_uint16(out, extension_type->iana_value));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, extension_type->iana_value));
/* Reserve space for extension size */
struct s2n_stuffer_reservation extension_size_bytes = {0};
- GUARD(s2n_stuffer_reserve_uint16(out, &extension_size_bytes));
+ POSIX_GUARD(s2n_stuffer_reserve_uint16(out, &extension_size_bytes));
/* Write extension data */
- GUARD(extension_type->send(conn, out));
+ POSIX_GUARD(extension_type->send(conn, out));
/* Record extension size */
- GUARD(s2n_stuffer_write_vector_size(&extension_size_bytes));
+ POSIX_GUARD(s2n_stuffer_write_vector_size(&extension_size_bytes));
/* Set request bit flag */
if (!extension_type->is_response) {
@@ -123,20 +128,37 @@ int s2n_extension_send(const s2n_extension_type *extension_type, struct s2n_conn
int s2n_extension_recv(const s2n_extension_type *extension_type, struct s2n_connection *conn, struct s2n_stuffer *in)
{
- notnull_check(extension_type);
- notnull_check(extension_type->recv);
- notnull_check(conn);
+ POSIX_ENSURE_REF(extension_type);
+ POSIX_ENSURE_REF(extension_type->recv);
+ POSIX_ENSURE_REF(conn);
s2n_extension_type_id extension_id;
- GUARD(s2n_extension_supported_iana_value_to_id(extension_type->iana_value, &extension_id));
-
- /* Do not accept a response if we did not send a request */
- if(extension_type->is_response &&
+ POSIX_GUARD(s2n_extension_supported_iana_value_to_id(extension_type->iana_value, &extension_id));
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2
+ *# Implementations MUST NOT send extension responses if the remote
+ *# endpoint did not send the corresponding extension requests, with the
+ *# exception of the "cookie" extension in the HelloRetryRequest. Upon
+ *# receiving such an extension, an endpoint MUST abort the handshake
+ *# with an "unsupported_extension" alert.
+ *
+ *= https://tools.ietf.org/rfc/rfc7627#section-5.3
+ *# If the original session did not use the "extended_master_secret"
+ *# extension but the new ServerHello contains the extension, the
+ *# client MUST abort the handshake.
+ **/
+ if (extension_type->is_response &&
!S2N_CBIT_TEST(conn->extension_requests_sent, extension_id)) {
- S2N_ERROR(S2N_ERR_UNSUPPORTED_EXTENSION);
+ POSIX_BAIL(S2N_ERR_UNSUPPORTED_EXTENSION);
}
- GUARD(extension_type->recv(conn, in));
+ /* Do not process an extension not valid for the protocol version */
+ if (extension_type->minimum_version > conn->actual_protocol_version) {
+ return S2N_SUCCESS;
+ }
+
+ POSIX_GUARD(extension_type->recv(conn, in));
/* Set request bit flag */
if (!extension_type->is_response) {
@@ -148,12 +170,12 @@ int s2n_extension_recv(const s2n_extension_type *extension_type, struct s2n_conn
int s2n_extension_is_missing(const s2n_extension_type *extension_type, struct s2n_connection *conn)
{
- notnull_check(extension_type);
- notnull_check(extension_type->if_missing);
- notnull_check(conn);
+ POSIX_ENSURE_REF(extension_type);
+ POSIX_ENSURE_REF(extension_type->if_missing);
+ POSIX_ENSURE_REF(conn);
s2n_extension_type_id extension_id;
- GUARD(s2n_extension_supported_iana_value_to_id(extension_type->iana_value, &extension_id));
+ POSIX_GUARD(s2n_extension_supported_iana_value_to_id(extension_type->iana_value, &extension_id));
/* Do not consider an extension missing if we did not send a request */
if(extension_type->is_response &&
@@ -161,19 +183,24 @@ int s2n_extension_is_missing(const s2n_extension_type *extension_type, struct s2
return S2N_SUCCESS;
}
- GUARD(extension_type->if_missing(conn));
+ /* Do not consider an extension missing if it is not valid for the protocol version */
+ if (extension_type->minimum_version > conn->actual_protocol_version) {
+ return S2N_SUCCESS;
+ }
+
+ POSIX_GUARD(extension_type->if_missing(conn));
return S2N_SUCCESS;
}
int s2n_extension_send_unimplemented(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- S2N_ERROR(S2N_ERR_UNIMPLEMENTED);
+ POSIX_BAIL(S2N_ERR_UNIMPLEMENTED);
}
int s2n_extension_recv_unimplemented(struct s2n_connection *conn, struct s2n_stuffer *in)
{
- S2N_ERROR(S2N_ERR_UNIMPLEMENTED);
+ POSIX_BAIL(S2N_ERR_UNIMPLEMENTED);
}
int s2n_extension_send_noop(struct s2n_connection *conn, struct s2n_stuffer *out)
@@ -203,7 +230,7 @@ bool s2n_extension_send_if_tls13_connection(struct s2n_connection *conn)
int s2n_extension_error_if_missing(struct s2n_connection *conn)
{
- S2N_ERROR(S2N_ERR_MISSING_EXTENSION);
+ POSIX_BAIL(S2N_ERR_MISSING_EXTENSION);
}
int s2n_extension_noop_if_missing(struct s2n_connection *conn)
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type.h b/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type.h
index 3c95af639f..f3ccf58730 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type.h
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type.h
@@ -22,6 +22,7 @@
#define S2N_EXTENSION_TYPE_FIELD_LENGTH 2
#define S2N_EXTENSION_LENGTH_FIELD_LENGTH 2
+#define S2N_EXTENSION_HEADER_LENGTH (S2N_EXTENSION_TYPE_FIELD_LENGTH + S2N_EXTENSION_LENGTH_FIELD_LENGTH)
/* The number of extensions supported by S2N */
#define S2N_SUPPORTED_EXTENSIONS_COUNT (sizeof(s2n_supported_extensions) / sizeof(s2n_supported_extensions[0]))
@@ -34,6 +35,7 @@ struct s2n_connection;
typedef struct {
uint16_t iana_value;
unsigned is_response:1;
+ uint16_t minimum_version;
int (*send) (struct s2n_connection *conn, struct s2n_stuffer *out);
int (*recv) (struct s2n_connection *conn, struct s2n_stuffer *in);
@@ -63,6 +65,8 @@ static const uint16_t s2n_supported_extensions[] = {
TLS_QUIC_TRANSPORT_PARAMETERS,
TLS_EXTENSION_PSK_KEY_EXCHANGE_MODES,
TLS_EXTENSION_PRE_SHARED_KEY,
+ TLS_EXTENSION_EARLY_DATA,
+ TLS_EXTENSION_EMS,
};
typedef char s2n_extension_bitfield[S2N_SUPPORTED_EXTENSIONS_BITFIELD_LEN];
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type_lists.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type_lists.c
index 5395366a6c..ba749a4e87 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type_lists.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_extension_type_lists.c
@@ -13,7 +13,7 @@
* permissions and limitations under the License.
*/
-#include <s2n.h>
+#include "api/s2n.h"
#include "tls/extensions/s2n_extension_type_lists.h"
#include "tls/s2n_connection.h"
@@ -31,6 +31,8 @@
#include "tls/extensions/s2n_client_supported_groups.h"
#include "tls/extensions/s2n_client_pq_kem.h"
#include "tls/extensions/s2n_client_psk.h"
+#include "tls/extensions/s2n_ems.h"
+#include "tls/extensions/s2n_early_data_indication.h"
#include "tls/extensions/s2n_psk_key_exchange_modes.h"
#include "tls/extensions/s2n_client_renegotiation_info.h"
#include "tls/extensions/s2n_ec_point_format.h"
@@ -50,7 +52,13 @@
static const s2n_extension_type *const client_hello_extensions[] = {
&s2n_client_supported_versions_extension,
+
+ /* We MUST process key_share after supported_groups,
+ * because we need to choose the keyshare based on the
+ * mutually supported groups. */
+ &s2n_client_supported_groups_extension,
&s2n_client_key_share_extension,
+
&s2n_client_signature_algorithms_extension,
&s2n_client_server_name_extension,
&s2n_client_alpn_extension,
@@ -58,13 +66,14 @@ static const s2n_extension_type *const client_hello_extensions[] = {
&s2n_client_sct_list_extension,
&s2n_client_max_frag_len_extension,
&s2n_client_session_ticket_extension,
- &s2n_client_supported_groups_extension,
&s2n_client_ec_point_format_extension,
&s2n_client_pq_kem_extension,
&s2n_client_renegotiation_info_extension,
&s2n_client_cookie_extension,
&s2n_quic_transport_parameters_extension,
&s2n_psk_key_exchange_modes_extension,
+ &s2n_client_early_data_indication_extension,
+ &s2n_client_ems_extension,
&s2n_client_psk_extension /* MUST be last */
};
@@ -78,6 +87,24 @@ static const s2n_extension_type *const tls12_server_hello_extensions[] = {
&s2n_server_sct_list_extension,
&s2n_server_max_fragment_length_extension,
&s2n_server_session_ticket_extension,
+ &s2n_server_ems_extension,
+};
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.1.4
+ *# The
+ *# HelloRetryRequest extensions defined in this specification are:
+ *#
+ *# - supported_versions (see Section 4.2.1)
+ *#
+ *# - cookie (see Section 4.2.2)
+ *#
+ *# - key_share (see Section 4.2.8)
+ */
+static const s2n_extension_type *const hello_retry_request_extensions[] = {
+ &s2n_server_supported_versions_extension,
+ &s2n_server_cookie_extension,
+ &s2n_server_key_share_extension,
};
static const s2n_extension_type *const tls13_server_hello_extensions[] = {
@@ -92,6 +119,7 @@ static const s2n_extension_type *const encrypted_extensions[] = {
&s2n_server_max_fragment_length_extension,
&s2n_server_alpn_extension,
&s2n_quic_transport_parameters_extension,
+ &s2n_server_early_data_indication_extension,
};
static const s2n_extension_type *const cert_req_extensions[] = {
@@ -103,22 +131,28 @@ static const s2n_extension_type *const certificate_extensions[] = {
&s2n_server_sct_list_extension,
};
+static const s2n_extension_type *const nst_extensions[] = {
+ &s2n_nst_early_data_indication_extension,
+};
+
#define S2N_EXTENSION_LIST(list) { .extension_types = (list), .count = s2n_array_len(list) }
static s2n_extension_type_list extension_lists[] = {
[S2N_EXTENSION_LIST_CLIENT_HELLO] = S2N_EXTENSION_LIST(client_hello_extensions),
+ [S2N_EXTENSION_LIST_HELLO_RETRY_REQUEST] = S2N_EXTENSION_LIST(hello_retry_request_extensions),
[S2N_EXTENSION_LIST_SERVER_HELLO_DEFAULT] = S2N_EXTENSION_LIST(tls12_server_hello_extensions),
[S2N_EXTENSION_LIST_SERVER_HELLO_TLS13] = S2N_EXTENSION_LIST(tls13_server_hello_extensions),
[S2N_EXTENSION_LIST_ENCRYPTED_EXTENSIONS] = S2N_EXTENSION_LIST(encrypted_extensions),
[S2N_EXTENSION_LIST_CERT_REQ] = S2N_EXTENSION_LIST(cert_req_extensions),
[S2N_EXTENSION_LIST_CERTIFICATE] = S2N_EXTENSION_LIST(certificate_extensions),
+ [S2N_EXTENSION_LIST_NST] = S2N_EXTENSION_LIST(nst_extensions),
[S2N_EXTENSION_LIST_EMPTY] = { .extension_types = NULL, .count = 0 },
};
int s2n_extension_type_list_get(s2n_extension_list_id list_type, s2n_extension_type_list **extension_list)
{
- notnull_check(extension_list);
- lt_check(list_type, s2n_array_len(extension_lists));
+ POSIX_ENSURE_REF(extension_list);
+ POSIX_ENSURE_LT(list_type, s2n_array_len(extension_lists));
*extension_list = &extension_lists[list_type];
return S2N_SUCCESS;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_key_share.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_key_share.c
index a91dbfd30b..6b3f144b3a 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_key_share.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_key_share.c
@@ -17,17 +17,33 @@
#include "tls/s2n_tls.h"
#include "utils/s2n_safety.h"
-int s2n_ecdhe_parameters_send(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *out)
+/* Generate and write an ecc point.
+ * This is used to write the ecc portion of PQ hybrid keyshares, which does NOT include the curve id.
+ */
+S2N_RESULT s2n_ecdhe_send_public_key(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *out)
{
- notnull_check(out);
- notnull_check(ecc_evp_params);
- notnull_check(ecc_evp_params->negotiated_curve);
+ RESULT_ENSURE_REF(ecc_evp_params);
+ RESULT_ENSURE_REF(ecc_evp_params->negotiated_curve);
+
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint16(out, ecc_evp_params->negotiated_curve->share_size));
+ if (ecc_evp_params->evp_pkey == NULL) {
+ RESULT_GUARD_POSIX(s2n_ecc_evp_generate_ephemeral_key(ecc_evp_params));
+ }
+ RESULT_GUARD_POSIX(s2n_ecc_evp_write_params_point(ecc_evp_params, out));
+
+ return S2N_RESULT_OK;
+}
- GUARD(s2n_stuffer_write_uint16(out, ecc_evp_params->negotiated_curve->iana_id));
- GUARD(s2n_stuffer_write_uint16(out, ecc_evp_params->negotiated_curve->share_size));
+/* Generate and write an ecc point and its corresponding curve id.
+ * This is used to write ecc keyshares for the client and server key_share extensions.
+ */
+int s2n_ecdhe_parameters_send(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *out)
+{
+ POSIX_ENSURE_REF(ecc_evp_params);
+ POSIX_ENSURE_REF(ecc_evp_params->negotiated_curve);
- GUARD(s2n_ecc_evp_generate_ephemeral_key(ecc_evp_params));
- GUARD(s2n_ecc_evp_write_params_point(ecc_evp_params, out));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, ecc_evp_params->negotiated_curve->iana_id));
+ POSIX_GUARD_RESULT(s2n_ecdhe_send_public_key(ecc_evp_params, out));
- return 0;
+ return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_key_share.h b/contrib/restricted/aws/s2n/tls/extensions/s2n_key_share.h
index f621d76b52..daa464c2bd 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_key_share.h
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_key_share.h
@@ -25,4 +25,5 @@
#define S2N_SIZE_OF_NAMED_GROUP 2
#define S2N_SIZE_OF_KEY_SHARE_SIZE 2
-extern int s2n_ecdhe_parameters_send(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *out);
+S2N_RESULT s2n_ecdhe_send_public_key(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *out);
+int s2n_ecdhe_parameters_send(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *out);
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_nst_early_data_indication.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_nst_early_data_indication.c
new file mode 100644
index 0000000000..0f149ec2fe
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_nst_early_data_indication.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include "api/s2n.h"
+
+#include "tls/extensions/s2n_early_data_indication.h"
+
+#include "stuffer/s2n_stuffer.h"
+#include "tls/s2n_connection.h"
+#include "tls/s2n_early_data.h"
+#include "utils/s2n_safety.h"
+
+static bool s2n_nst_early_data_indication_should_send(struct s2n_connection *conn)
+{
+ uint32_t server_max_early_data = 0;
+ return s2n_result_is_ok(s2n_early_data_get_server_max_size(conn, &server_max_early_data))
+ && server_max_early_data > 0;
+}
+
+/**
+ * The client version of this extension is empty, so we don't read/write any data.
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# The "extension_data" field of this extension contains an
+ *# "EarlyDataIndication" value.
+ *#
+ *# struct {} Empty;
+ *#
+ *# struct {
+ *# select (Handshake.msg_type) {
+ *# case new_session_ticket: uint32 max_early_data_size;
+ **
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# };
+ *# } EarlyDataIndication;
+ **/
+
+static int s2n_nst_early_data_indication_send(struct s2n_connection *conn, struct s2n_stuffer *out)
+{
+ uint32_t server_max_early_data = 0;
+ POSIX_GUARD_RESULT(s2n_early_data_get_server_max_size(conn, &server_max_early_data));
+ POSIX_GUARD(s2n_stuffer_write_uint32(out, server_max_early_data));
+ return S2N_SUCCESS;
+}
+
+static int s2n_nst_early_data_indiction_recv(struct s2n_connection *conn, struct s2n_stuffer *in)
+{
+ POSIX_ENSURE_REF(conn);
+ uint32_t server_max_early_data = 0;
+ POSIX_GUARD(s2n_stuffer_read_uint32(in, &server_max_early_data));
+ POSIX_GUARD(s2n_connection_set_server_max_early_data_size(conn, server_max_early_data));
+ return S2N_SUCCESS;
+}
+
+static int s2n_nst_early_data_indication_missing(struct s2n_connection *conn)
+{
+ POSIX_GUARD(s2n_connection_set_server_max_early_data_size(conn, 0));
+ return S2N_SUCCESS;
+}
+
+const s2n_extension_type s2n_nst_early_data_indication_extension = {
+ .iana_value = TLS_EXTENSION_EARLY_DATA,
+ .is_response = false,
+ .send = s2n_nst_early_data_indication_send,
+ .recv = s2n_nst_early_data_indiction_recv,
+ .should_send = s2n_nst_early_data_indication_should_send,
+ .if_missing = s2n_nst_early_data_indication_missing,
+};
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_psk_key_exchange_modes.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_psk_key_exchange_modes.c
index 67b1904b6b..e66a552772 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_psk_key_exchange_modes.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_psk_key_exchange_modes.c
@@ -18,16 +18,16 @@
#include "tls/s2n_tls_parameters.h"
#include "tls/extensions/s2n_client_psk.h"
+#include "tls/extensions/s2n_psk_key_exchange_modes.h"
#include "utils/s2n_safety.h"
-#define PSK_KEY_EXCHANGE_MODE_SIZE sizeof(uint8_t)
-
static bool s2n_psk_key_exchange_modes_should_send(struct s2n_connection *conn);
static int s2n_psk_key_exchange_modes_send(struct s2n_connection *conn, struct s2n_stuffer *out);
static int s2n_psk_key_exchange_modes_recv(struct s2n_connection *conn, struct s2n_stuffer *extension);
const s2n_extension_type s2n_psk_key_exchange_modes_extension = {
.iana_value = TLS_EXTENSION_PSK_KEY_EXCHANGE_MODES,
+ .minimum_version = S2N_TLS13,
.is_response = false,
.send = s2n_psk_key_exchange_modes_send,
.recv = s2n_psk_key_exchange_modes_recv,
@@ -43,26 +43,22 @@ static bool s2n_psk_key_exchange_modes_should_send(struct s2n_connection *conn)
static int s2n_psk_key_exchange_modes_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
- GUARD(s2n_stuffer_write_uint8(out, PSK_KEY_EXCHANGE_MODE_SIZE));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, PSK_KEY_EXCHANGE_MODE_SIZE));
/* s2n currently only supports pre-shared keys with (EC)DHE key establishment */
- GUARD(s2n_stuffer_write_uint8(out, TLS_PSK_DHE_KE_MODE));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, TLS_PSK_DHE_KE_MODE));
return S2N_SUCCESS;
}
static int s2n_psk_key_exchange_modes_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- notnull_check(conn);
-
- if (s2n_connection_get_protocol_version(conn) < S2N_TLS13) {
- return S2N_SUCCESS;
- }
+ POSIX_ENSURE_REF(conn);
uint8_t psk_ke_mode_list_len;
- GUARD(s2n_stuffer_read_uint8(extension, &psk_ke_mode_list_len));
+ POSIX_GUARD(s2n_stuffer_read_uint8(extension, &psk_ke_mode_list_len));
if (psk_ke_mode_list_len > s2n_stuffer_data_available(extension)) {
/* Malformed length, ignore the extension */
return S2N_SUCCESS;
@@ -70,7 +66,7 @@ static int s2n_psk_key_exchange_modes_recv(struct s2n_connection *conn, struct s
for (size_t i = 0; i < psk_ke_mode_list_len; i++) {
uint8_t wire_psk_ke_mode;
- GUARD(s2n_stuffer_read_uint8(extension, &wire_psk_ke_mode));
+ POSIX_GUARD(s2n_stuffer_read_uint8(extension, &wire_psk_ke_mode));
/* s2n currently only supports pre-shared keys with (EC)DHE key establishment */
if (wire_psk_ke_mode == TLS_PSK_DHE_KE_MODE) {
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_psk_key_exchange_modes.h b/contrib/restricted/aws/s2n/tls/extensions/s2n_psk_key_exchange_modes.h
index 3c64f64489..74c1fe9797 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_psk_key_exchange_modes.h
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_psk_key_exchange_modes.h
@@ -19,4 +19,6 @@
#include "tls/s2n_connection.h"
#include "stuffer/s2n_stuffer.h"
+#define PSK_KEY_EXCHANGE_MODE_SIZE sizeof(uint8_t)
+
extern const s2n_extension_type s2n_psk_key_exchange_modes_extension;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_quic_transport_params.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_quic_transport_params.c
index 56f17abc3b..ab84388e7a 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_quic_transport_params.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_quic_transport_params.c
@@ -31,44 +31,45 @@
static bool s2n_quic_transport_params_should_send(struct s2n_connection *conn)
{
- return conn && conn->config && conn->config->quic_enabled;
+ return s2n_connection_is_quic_enabled(conn);
}
static int s2n_quic_transport_params_if_missing(struct s2n_connection *conn)
{
- notnull_check(conn);
- notnull_check(conn->config);
- ENSURE_POSIX(!conn->config->quic_enabled, S2N_ERR_MISSING_EXTENSION);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->config);
+ POSIX_ENSURE(!s2n_connection_is_quic_enabled(conn), S2N_ERR_MISSING_EXTENSION);
return S2N_SUCCESS;
}
static int s2n_quic_transport_params_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- notnull_check(conn);
- notnull_check(out);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(out);
if (conn->our_quic_transport_parameters.size) {
- GUARD(s2n_stuffer_write(out, &conn->our_quic_transport_parameters));
+ POSIX_GUARD(s2n_stuffer_write(out, &conn->our_quic_transport_parameters));
}
return S2N_SUCCESS;
}
static int s2n_quic_transport_params_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- notnull_check(conn);
- notnull_check(extension);
- notnull_check(conn->config);
- ENSURE_POSIX(conn->config->quic_enabled, S2N_ERR_UNSUPPORTED_EXTENSION);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(extension);
+ POSIX_ENSURE_REF(conn->config);
+ POSIX_ENSURE(s2n_connection_is_quic_enabled(conn), S2N_ERR_UNSUPPORTED_EXTENSION);
if (s2n_stuffer_data_available(extension)) {
- GUARD(s2n_alloc(&conn->peer_quic_transport_parameters, s2n_stuffer_data_available(extension)));
- GUARD(s2n_stuffer_read(extension, &conn->peer_quic_transport_parameters));
+ POSIX_GUARD(s2n_alloc(&conn->peer_quic_transport_parameters, s2n_stuffer_data_available(extension)));
+ POSIX_GUARD(s2n_stuffer_read(extension, &conn->peer_quic_transport_parameters));
}
return S2N_SUCCESS;
}
const s2n_extension_type s2n_quic_transport_parameters_extension = {
.iana_value = TLS_QUIC_TRANSPORT_PARAMETERS,
+ .minimum_version = S2N_TLS13,
.is_response = false,
.send = s2n_quic_transport_params_send,
.recv = s2n_quic_transport_params_recv,
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_alpn.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_alpn.c
index a1f5ac17cf..9e6dee2136 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_alpn.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_alpn.c
@@ -42,39 +42,39 @@ static bool s2n_alpn_should_send(struct s2n_connection *conn)
static int s2n_alpn_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
const uint8_t application_protocol_len = strlen(conn->application_protocol);
/* Size of protocol name list */
- GUARD(s2n_stuffer_write_uint16(out, application_protocol_len + sizeof(uint8_t)));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, application_protocol_len + sizeof(uint8_t)));
/* Single entry in protocol name list */
- GUARD(s2n_stuffer_write_uint8(out, application_protocol_len));
- GUARD(s2n_stuffer_write_bytes(out, (uint8_t *) conn->application_protocol, application_protocol_len));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, application_protocol_len));
+ POSIX_GUARD(s2n_stuffer_write_bytes(out, (uint8_t *) conn->application_protocol, application_protocol_len));
return S2N_SUCCESS;
}
static int s2n_alpn_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
uint16_t size_of_all;
- GUARD(s2n_stuffer_read_uint16(extension, &size_of_all));
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &size_of_all));
if (size_of_all > s2n_stuffer_data_available(extension) || size_of_all < 3) {
/* ignore invalid extension size */
return S2N_SUCCESS;
}
uint8_t protocol_len;
- GUARD(s2n_stuffer_read_uint8(extension, &protocol_len));
- lt_check(protocol_len, s2n_array_len(conn->application_protocol));
+ POSIX_GUARD(s2n_stuffer_read_uint8(extension, &protocol_len));
+ POSIX_ENSURE_LT(protocol_len, s2n_array_len(conn->application_protocol));
uint8_t *protocol = s2n_stuffer_raw_read(extension, protocol_len);
- notnull_check(protocol);
+ POSIX_ENSURE_REF(protocol);
/* copy the first protocol name */
- memcpy_check(conn->application_protocol, protocol, protocol_len);
+ POSIX_CHECKED_MEMCPY(conn->application_protocol, protocol, protocol_len);
conn->application_protocol[protocol_len] = '\0';
return S2N_SUCCESS;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_certificate_status.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_certificate_status.c
index 486835689c..05063011c7 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_certificate_status.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_certificate_status.c
@@ -44,37 +44,53 @@ static bool s2n_tls13_server_status_request_should_send(struct s2n_connection *c
int s2n_server_certificate_status_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
struct s2n_blob *ocsp_status = &conn->handshake_params.our_chain_and_key->ocsp_status;
- notnull_check(ocsp_status);
+ POSIX_ENSURE_REF(ocsp_status);
- GUARD(s2n_stuffer_write_uint8(out, (uint8_t) S2N_STATUS_REQUEST_OCSP));
- GUARD(s2n_stuffer_write_uint24(out, ocsp_status->size));
- GUARD(s2n_stuffer_write(out, ocsp_status));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, (uint8_t) S2N_STATUS_REQUEST_OCSP));
+ POSIX_GUARD(s2n_stuffer_write_uint24(out, ocsp_status->size));
+ POSIX_GUARD(s2n_stuffer_write(out, ocsp_status));
return S2N_SUCCESS;
}
int s2n_server_certificate_status_recv(struct s2n_connection *conn, struct s2n_stuffer *in)
{
- notnull_check(conn);
-
+ POSIX_ENSURE_REF(conn);
+ /**
+ *= https://tools.ietf.org/rfc/rfc6066#section-8
+ *# struct {
+ *# CertificateStatusType status_type;
+ *# select (status_type) {
+ *# case ocsp: OCSPResponse;
+ *# } response;
+ *# } CertificateStatus;
+ *#
+ *# opaque OCSPResponse<1..2^24-1>;
+ *#
+ *# An "ocsp_response" contains a complete, DER-encoded OCSP response
+ *# (using the ASN.1 type OCSPResponse defined in [RFC2560]). Only one
+ *# OCSP response may be sent.
+ **/
uint8_t type;
- GUARD(s2n_stuffer_read_uint8(in, &type));
+ POSIX_GUARD(s2n_stuffer_read_uint8(in, &type));
if (type != S2N_STATUS_REQUEST_OCSP) {
/* We only support OCSP */
return S2N_SUCCESS;
}
+ conn->status_type = S2N_STATUS_REQUEST_OCSP;
uint32_t status_size;
- GUARD(s2n_stuffer_read_uint24(in, &status_size));
- lte_check(status_size, s2n_stuffer_data_available(in));
+ POSIX_GUARD(s2n_stuffer_read_uint24(in, &status_size));
+ POSIX_ENSURE_LTE(status_size, s2n_stuffer_data_available(in));
- GUARD(s2n_realloc(&conn->status_response, status_size));
- GUARD(s2n_stuffer_read_bytes(in, conn->status_response.data, status_size));
+ POSIX_GUARD(s2n_realloc(&conn->status_response, status_size));
+ POSIX_GUARD(s2n_stuffer_read_bytes(in, conn->status_response.data, status_size));
- GUARD(s2n_x509_validator_validate_cert_stapled_ocsp_response(
- &conn->x509_validator, conn, conn->status_response.data, conn->status_response.size));
+ POSIX_ENSURE(s2n_x509_validator_validate_cert_stapled_ocsp_response(
+ &conn->x509_validator, conn, conn->status_response.data, conn->status_response.size) == S2N_CERT_OK,
+ S2N_ERR_CERT_UNTRUSTED);
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_early_data_indication.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_early_data_indication.c
new file mode 100644
index 0000000000..80a8143a81
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_early_data_indication.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include "api/s2n.h"
+
+#include "tls/extensions/s2n_early_data_indication.h"
+
+#include "tls/s2n_connection.h"
+#include "tls/s2n_early_data.h"
+#include "tls/s2n_handshake.h"
+#include "utils/s2n_safety.h"
+
+static bool s2n_server_early_data_indication_should_send(struct s2n_connection *conn)
+{
+ return conn && conn->early_data_state == S2N_EARLY_DATA_ACCEPTED;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# A server which receives an "early_data" extension MUST behave in one
+ *# of three ways:
+ *#
+ *# - Ignore the extension and return a regular 1-RTT response.
+ **/
+static int s2n_server_early_data_indication_is_missing(struct s2n_connection *conn)
+{
+ POSIX_ENSURE_REF(conn);
+ if (conn->early_data_state == S2N_EARLY_DATA_REQUESTED) {
+ POSIX_GUARD_RESULT(s2n_connection_set_early_data_state(conn, S2N_EARLY_DATA_REJECTED));
+ }
+ return S2N_SUCCESS;
+}
+
+/**
+ * The server version of this extension is empty, so we don't read/write any data.
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# The "extension_data" field of this extension contains an
+ *# "EarlyDataIndication" value.
+ *#
+ *# struct {} Empty;
+ *#
+ *# struct {
+ *# select (Handshake.msg_type) {
+ **
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# case encrypted_extensions: Empty;
+ *# };
+ *# } EarlyDataIndication;
+ **/
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# A server which receives an "early_data" extension MUST behave in one
+ *# of three ways:
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# - Return its own "early_data" extension in EncryptedExtensions,
+ *# indicating that it intends to process the early data.
+ **/
+
+static int s2n_server_early_data_indication_send(struct s2n_connection *conn, struct s2n_stuffer *out)
+{
+ return S2N_SUCCESS;
+}
+
+static int s2n_server_early_data_indication_recv(struct s2n_connection *conn, struct s2n_stuffer *in)
+{
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# If any of these checks fail, the server MUST NOT respond with the
+ *# extension
+ **/
+ POSIX_ENSURE(s2n_early_data_is_valid_for_connection(conn), S2N_ERR_EARLY_DATA_NOT_ALLOWED);
+
+ POSIX_GUARD_RESULT(s2n_connection_set_early_data_state(conn, S2N_EARLY_DATA_ACCEPTED));
+
+ /* The client does not know for sure whether the server accepted early data until it receives
+ * this extension as part of the EncryptedExtensions message, after the handshake type has
+ * already been calculated. We'll need to manually update the handshake type.
+ */
+ conn->handshake.handshake_type |= WITH_EARLY_DATA;
+
+ return S2N_SUCCESS;
+}
+
+const s2n_extension_type s2n_server_early_data_indication_extension = {
+ .iana_value = TLS_EXTENSION_EARLY_DATA,
+ .is_response = true,
+ .send = s2n_server_early_data_indication_send,
+ .recv = s2n_server_early_data_indication_recv,
+ .should_send = s2n_server_early_data_indication_should_send,
+ .if_missing = s2n_server_early_data_indication_is_missing,
+};
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_ems.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_ems.c
new file mode 100644
index 0000000000..06f0474d8d
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_ems.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include <sys/param.h>
+#include <stdint.h>
+
+#include "tls/s2n_tls.h"
+#include "tls/extensions/s2n_ems.h"
+
+#include "utils/s2n_safety.h"
+
+static int s2n_server_ems_recv(struct s2n_connection *conn, struct s2n_stuffer *extension);
+static bool s2n_server_ems_should_send(struct s2n_connection *conn);
+static int s2n_server_ems_if_missing(struct s2n_connection *conn);
+
+/**
+ *= https://tools.ietf.org/rfc/rfc7627#section-5.1
+ *#
+ *# This document defines a new TLS extension, "extended_master_secret"
+ *# (with extension type 0x0017), which is used to signal both client and
+ *# server to use the extended master secret computation. The
+ *# "extension_data" field of this extension is empty. Thus, the entire
+ *# encoding of the extension is 00 17 00 00 (in hexadecimal.)
+ **/
+const s2n_extension_type s2n_server_ems_extension = {
+ .iana_value = TLS_EXTENSION_EMS,
+ .is_response = true,
+ .send = s2n_extension_send_noop,
+ .recv = s2n_server_ems_recv,
+ .should_send = s2n_server_ems_should_send,
+ .if_missing = s2n_server_ems_if_missing,
+};
+
+static int s2n_server_ems_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
+{
+ POSIX_ENSURE_REF(conn);
+
+ /* Read nothing. The extension just needs to exist. */
+ conn->ems_negotiated = true;
+
+ return S2N_SUCCESS;
+}
+
+static bool s2n_server_ems_should_send(struct s2n_connection *conn)
+{
+ return conn && conn->actual_protocol_version < S2N_TLS13;
+}
+
+static int s2n_server_ems_if_missing(struct s2n_connection *conn)
+{
+ POSIX_ENSURE_REF(conn);
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc7627#section-5.3
+ *# If the original session used the extension but the new ServerHello
+ *# does not contain the extension, the client MUST abort the
+ *# handshake.
+ **/
+ POSIX_ENSURE(!conn->ems_negotiated, S2N_ERR_MISSING_EXTENSION);
+
+ return S2N_SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_key_share.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_key_share.c
index eb0ead255a..46f5deb1d5 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_key_share.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_key_share.c
@@ -27,115 +27,105 @@ static int s2n_server_key_share_recv(struct s2n_connection *conn, struct s2n_stu
const s2n_extension_type s2n_server_key_share_extension = {
.iana_value = TLS_EXTENSION_KEY_SHARE,
+ .minimum_version = S2N_TLS13,
.is_response = false,
.send = s2n_server_key_share_send,
.recv = s2n_server_key_share_recv,
- .should_send = s2n_extension_send_if_tls13_connection,
+ .should_send = s2n_extension_always_send,
.if_missing = s2n_extension_noop_if_missing,
};
static int s2n_server_key_share_generate_pq_hybrid(struct s2n_connection *conn, struct s2n_stuffer *out) {
- notnull_check(out);
- notnull_check(conn);
+ POSIX_ENSURE_REF(out);
+ POSIX_ENSURE_REF(conn);
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
- struct s2n_kem_group_params *server_kem_group_params = &conn->secure.server_kem_group_params;
+ struct s2n_kem_group_params *server_kem_group_params = &conn->kex_params.server_kem_group_params;
- notnull_check(server_kem_group_params->kem_group);
- GUARD(s2n_stuffer_write_uint16(out, server_kem_group_params->kem_group->iana_id));
+ POSIX_ENSURE_REF(server_kem_group_params->kem_group);
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, server_kem_group_params->kem_group->iana_id));
struct s2n_stuffer_reservation total_share_size = { 0 };
- GUARD(s2n_stuffer_reserve_uint16(out, &total_share_size));
+ POSIX_GUARD(s2n_stuffer_reserve_uint16(out, &total_share_size));
struct s2n_ecc_evp_params *server_ecc_params = &server_kem_group_params->ecc_params;
- notnull_check(server_ecc_params->negotiated_curve);
- GUARD(s2n_stuffer_write_uint16(out, server_ecc_params->negotiated_curve->share_size));
- GUARD(s2n_ecc_evp_generate_ephemeral_key(server_ecc_params));
- GUARD(s2n_ecc_evp_write_params_point(server_ecc_params, out));
-
- notnull_check(conn->secure.chosen_client_kem_group_params);
- struct s2n_kem_params *client_kem_params = &conn->secure.chosen_client_kem_group_params->kem_params;
- notnull_check(client_kem_params->public_key.data);
+ POSIX_ENSURE_REF(server_ecc_params->negotiated_curve);
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, server_ecc_params->negotiated_curve->share_size));
+ POSIX_GUARD(s2n_ecc_evp_generate_ephemeral_key(server_ecc_params));
+ POSIX_GUARD(s2n_ecc_evp_write_params_point(server_ecc_params, out));
+
+ struct s2n_kem_params *client_kem_params = &conn->kex_params.client_kem_group_params.kem_params;
+ POSIX_ENSURE_REF(client_kem_params->public_key.data);
/* s2n_kem_send_ciphertext() will generate the PQ shared secret and use
* the client's public key to encapsulate; the PQ shared secret will be
* stored in client_kem_params, and will be used during the hybrid shared
* secret derivation. */
- GUARD(s2n_kem_send_ciphertext(out, client_kem_params));
+ POSIX_GUARD(s2n_kem_send_ciphertext(out, client_kem_params));
- GUARD(s2n_stuffer_write_vector_size(&total_share_size));
+ POSIX_GUARD(s2n_stuffer_write_vector_size(&total_share_size));
return S2N_SUCCESS;
}
/* Check that client has sent a corresponding key share for the server's KEM group */
int s2n_server_key_share_send_check_pq_hybrid(struct s2n_connection *conn) {
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
- notnull_check(conn->secure.server_kem_group_params.kem_group);
- notnull_check(conn->secure.server_kem_group_params.kem_params.kem);
- notnull_check(conn->secure.server_kem_group_params.ecc_params.negotiated_curve);
+ POSIX_ENSURE_REF(conn->kex_params.server_kem_group_params.kem_group);
+ POSIX_ENSURE_REF(conn->kex_params.server_kem_group_params.kem_params.kem);
+ POSIX_ENSURE_REF(conn->kex_params.server_kem_group_params.ecc_params.negotiated_curve);
- const struct s2n_kem_group *server_kem_group = conn->secure.server_kem_group_params.kem_group;
+ const struct s2n_kem_group *server_kem_group = conn->kex_params.server_kem_group_params.kem_group;
const struct s2n_kem_preferences *kem_pref = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
- notnull_check(kem_pref);
+ POSIX_GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
+ POSIX_ENSURE_REF(kem_pref);
- ENSURE_POSIX(s2n_kem_preferences_includes_tls13_kem_group(kem_pref, server_kem_group->iana_id),
+ POSIX_ENSURE(s2n_kem_preferences_includes_tls13_kem_group(kem_pref, server_kem_group->iana_id),
S2N_ERR_KEM_UNSUPPORTED_PARAMS);
- struct s2n_kem_group_params *client_params = conn->secure.chosen_client_kem_group_params;
- notnull_check(client_params);
-
- ENSURE_POSIX(client_params->kem_group == server_kem_group, S2N_ERR_BAD_KEY_SHARE);
+ struct s2n_kem_group_params *client_params = &conn->kex_params.client_kem_group_params;
+ POSIX_ENSURE(client_params->kem_group == server_kem_group, S2N_ERR_BAD_KEY_SHARE);
- ENSURE_POSIX(client_params->ecc_params.negotiated_curve == server_kem_group->curve, S2N_ERR_BAD_KEY_SHARE);
- ENSURE_POSIX(client_params->ecc_params.evp_pkey != NULL, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_ENSURE(client_params->ecc_params.negotiated_curve == server_kem_group->curve, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_ENSURE(client_params->ecc_params.evp_pkey != NULL, S2N_ERR_BAD_KEY_SHARE);
- ENSURE_POSIX(client_params->kem_params.kem == server_kem_group->kem, S2N_ERR_BAD_KEY_SHARE);
- ENSURE_POSIX(client_params->kem_params.public_key.size == server_kem_group->kem->public_key_length, S2N_ERR_BAD_KEY_SHARE);
- ENSURE_POSIX(client_params->kem_params.public_key.data != NULL, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_ENSURE(client_params->kem_params.kem == server_kem_group->kem, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_ENSURE(client_params->kem_params.public_key.size == server_kem_group->kem->public_key_length, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_ENSURE(client_params->kem_params.public_key.data != NULL, S2N_ERR_BAD_KEY_SHARE);
return S2N_SUCCESS;
}
/* Check that client has sent a corresponding key share for the server's EC curve */
int s2n_server_key_share_send_check_ecdhe(struct s2n_connection *conn) {
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
+ POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
+ POSIX_ENSURE_REF(ecc_pref);
- const struct s2n_ecc_named_curve *server_curve = conn->secure.server_ecc_evp_params.negotiated_curve;
- notnull_check(server_curve);
+ const struct s2n_ecc_named_curve *server_curve = conn->kex_params.server_ecc_evp_params.negotiated_curve;
+ POSIX_ENSURE_REF(server_curve);
- struct s2n_ecc_evp_params *client_params = NULL;
- for (size_t i = 0; i < ecc_pref->count; i++) {
- if (server_curve == ecc_pref->ecc_curves[i]) {
- client_params = &conn->secure.client_ecc_evp_params[i];
- break;
- }
- }
-
- notnull_check(client_params);
- ENSURE_POSIX(client_params->negotiated_curve == server_curve, S2N_ERR_BAD_KEY_SHARE);
- ENSURE_POSIX(client_params->evp_pkey != NULL, S2N_ERR_BAD_KEY_SHARE);
+ struct s2n_ecc_evp_params *client_params = &conn->kex_params.client_ecc_evp_params;
+ POSIX_ENSURE(client_params->negotiated_curve == server_curve, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_ENSURE(client_params->evp_pkey != NULL, S2N_ERR_BAD_KEY_SHARE);
return S2N_SUCCESS;
}
static int s2n_server_key_share_send(struct s2n_connection *conn, struct s2n_stuffer *out) {
- notnull_check(conn);
- notnull_check(out);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(out);
- const struct s2n_ecc_named_curve *curve = conn->secure.server_ecc_evp_params.negotiated_curve;
- const struct s2n_kem_group *kem_group = conn->secure.server_kem_group_params.kem_group;
+ const struct s2n_ecc_named_curve *curve = conn->kex_params.server_ecc_evp_params.negotiated_curve;
+ const struct s2n_kem_group *kem_group = conn->kex_params.server_kem_group_params.kem_group;
/* Boolean XOR: exactly one of {server_curve, server_kem_group} should be non-null. */
- ENSURE_POSIX((curve == NULL) != (kem_group == NULL), S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
+ POSIX_ENSURE((curve == NULL) != (kem_group == NULL), S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
/* Retry requests only require the selected named group, not an actual share.
* https://tools.ietf.org/html/rfc8446#section-4.2.8 */
@@ -147,16 +137,16 @@ static int s2n_server_key_share_send(struct s2n_connection *conn, struct s2n_stu
named_group_id = kem_group->iana_id;
}
- GUARD(s2n_stuffer_write_uint16(out, named_group_id));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, named_group_id));
return S2N_SUCCESS;
}
if (curve != NULL) {
- GUARD(s2n_server_key_share_send_check_ecdhe(conn));
- GUARD(s2n_ecdhe_parameters_send(&conn->secure.server_ecc_evp_params, out));
+ POSIX_GUARD(s2n_server_key_share_send_check_ecdhe(conn));
+ POSIX_GUARD(s2n_ecdhe_parameters_send(&conn->kex_params.server_ecc_evp_params, out));
} else {
- GUARD(s2n_server_key_share_send_check_pq_hybrid(conn));
- GUARD(s2n_server_key_share_generate_pq_hybrid(conn, out));
+ POSIX_GUARD(s2n_server_key_share_send_check_pq_hybrid(conn));
+ POSIX_GUARD(s2n_server_key_share_generate_pq_hybrid(conn, out));
}
return S2N_SUCCESS;
@@ -164,20 +154,20 @@ static int s2n_server_key_share_send(struct s2n_connection *conn, struct s2n_stu
static int s2n_server_key_share_recv_pq_hybrid(struct s2n_connection *conn, uint16_t named_group_iana,
struct s2n_stuffer *extension) {
- notnull_check(conn);
- notnull_check(extension);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(extension);
/* If PQ is disabled, the client should not have sent any PQ IDs
* in the supported_groups list of the initial ClientHello */
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
const struct s2n_kem_preferences *kem_pref = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
- notnull_check(kem_pref);
+ POSIX_GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
+ POSIX_ENSURE_REF(kem_pref);
/* This check should have been done higher up, but including it here as well for extra defense.
* Uses S2N_ERR_ECDHE_UNSUPPORTED_CURVE for backward compatibility. */
- ENSURE_POSIX(s2n_kem_preferences_includes_tls13_kem_group(kem_pref, named_group_iana), S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
+ POSIX_ENSURE(s2n_kem_preferences_includes_tls13_kem_group(kem_pref, named_group_iana), S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
size_t kem_group_index = 0;
for (size_t i = 0; i < kem_pref->tls13_kem_group_count; i++) {
@@ -187,7 +177,7 @@ static int s2n_server_key_share_recv_pq_hybrid(struct s2n_connection *conn, uint
}
}
- struct s2n_kem_group_params *server_kem_group_params = &conn->secure.server_kem_group_params;
+ struct s2n_kem_group_params *server_kem_group_params = &conn->kex_params.server_kem_group_params;
server_kem_group_params->kem_group = kem_pref->tls13_kem_groups[kem_group_index];
server_kem_group_params->kem_params.kem = kem_pref->tls13_kem_groups[kem_group_index]->kem;
server_kem_group_params->ecc_params.negotiated_curve = kem_pref->tls13_kem_groups[kem_group_index]->curve;
@@ -200,29 +190,26 @@ static int s2n_server_key_share_recv_pq_hybrid(struct s2n_connection *conn, uint
}
/* Ensure that the server's key share corresponds with a key share previously sent by the client */
- ENSURE_POSIX(conn->secure.client_kem_group_params[kem_group_index].kem_params.private_key.data != NULL,
- S2N_ERR_BAD_KEY_SHARE);
- ENSURE_POSIX(conn->secure.client_kem_group_params[kem_group_index].ecc_params.evp_pkey != NULL,
- S2N_ERR_BAD_KEY_SHARE);
- notnull_check(conn->secure.client_kem_group_params[kem_group_index].kem_group);
- eq_check(conn->secure.client_kem_group_params[kem_group_index].kem_group->iana_id, named_group_iana);
- conn->secure.chosen_client_kem_group_params = &conn->secure.client_kem_group_params[kem_group_index];
+ struct s2n_kem_group_params *client_kem_group_params = &conn->kex_params.client_kem_group_params;
+ POSIX_ENSURE(client_kem_group_params->kem_params.private_key.data, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_ENSURE(client_kem_group_params->ecc_params.evp_pkey, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_ENSURE(client_kem_group_params->kem_group == server_kem_group_params->kem_group, S2N_ERR_BAD_KEY_SHARE);
uint16_t received_total_share_size;
- GUARD(s2n_stuffer_read_uint16(extension, &received_total_share_size));
- ENSURE_POSIX(received_total_share_size == server_kem_group_params->kem_group->server_share_size, S2N_ERR_BAD_KEY_SHARE);
- ENSURE_POSIX(s2n_stuffer_data_available(extension) == received_total_share_size, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &received_total_share_size));
+ POSIX_ENSURE(received_total_share_size == server_kem_group_params->kem_group->server_share_size, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_ENSURE(s2n_stuffer_data_available(extension) == received_total_share_size, S2N_ERR_BAD_KEY_SHARE);
/* Parse ECC key share */
uint16_t ecc_share_size;
struct s2n_blob point_blob;
- GUARD(s2n_stuffer_read_uint16(extension, &ecc_share_size));
- ENSURE_POSIX(s2n_ecc_evp_read_params_point(extension, ecc_share_size, &point_blob) == S2N_SUCCESS, S2N_ERR_BAD_KEY_SHARE);
- ENSURE_POSIX(s2n_ecc_evp_parse_params_point(&point_blob, &server_kem_group_params->ecc_params) == S2N_SUCCESS, S2N_ERR_BAD_KEY_SHARE);
- ENSURE_POSIX(server_kem_group_params->ecc_params.evp_pkey != NULL, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &ecc_share_size));
+ POSIX_ENSURE(s2n_ecc_evp_read_params_point(extension, ecc_share_size, &point_blob) == S2N_SUCCESS, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_ENSURE(s2n_ecc_evp_parse_params_point(&point_blob, &server_kem_group_params->ecc_params) == S2N_SUCCESS, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_ENSURE(server_kem_group_params->ecc_params.evp_pkey != NULL, S2N_ERR_BAD_KEY_SHARE);
/* Parse the PQ KEM key share */
- ENSURE_POSIX(s2n_kem_recv_ciphertext(extension, &conn->secure.chosen_client_kem_group_params->kem_params) == S2N_SUCCESS,
+ POSIX_ENSURE(s2n_kem_recv_ciphertext(extension, &conn->kex_params.client_kem_group_params.kem_params) == S2N_SUCCESS,
S2N_ERR_BAD_KEY_SHARE);
return S2N_SUCCESS;
@@ -230,15 +217,15 @@ static int s2n_server_key_share_recv_pq_hybrid(struct s2n_connection *conn, uint
static int s2n_server_key_share_recv_ecc(struct s2n_connection *conn, uint16_t named_group_iana,
struct s2n_stuffer *extension) {
- notnull_check(conn);
- notnull_check(extension);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(extension);
const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
+ POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
+ POSIX_ENSURE_REF(ecc_pref);
/* This check should have been done higher up, but including it here as well for extra defense. */
- ENSURE_POSIX(s2n_ecc_preferences_includes_curve(ecc_pref, named_group_iana),
+ POSIX_ENSURE(s2n_ecc_preferences_includes_curve(ecc_pref, named_group_iana),
S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
size_t supported_curve_index = 0;
@@ -250,7 +237,7 @@ static int s2n_server_key_share_recv_ecc(struct s2n_connection *conn, uint16_t n
}
}
- struct s2n_ecc_evp_params *server_ecc_evp_params = &conn->secure.server_ecc_evp_params;
+ struct s2n_ecc_evp_params *server_ecc_evp_params = &conn->kex_params.server_ecc_evp_params;
server_ecc_evp_params->negotiated_curve = ecc_pref->ecc_curves[supported_curve_index];
/* If this is a HelloRetryRequest, we won't have a key share. We just have the selected group.
@@ -259,12 +246,14 @@ static int s2n_server_key_share_recv_ecc(struct s2n_connection *conn, uint16_t n
return S2N_SUCCESS;
}
- /* Key share not sent by client */
- S2N_ERROR_IF(conn->secure.client_ecc_evp_params[supported_curve_index].evp_pkey == NULL, S2N_ERR_BAD_KEY_SHARE);
+ /* Verify key share sent by client */
+ struct s2n_ecc_evp_params *client_ecc_evp_params = &conn->kex_params.client_ecc_evp_params;
+ POSIX_ENSURE(client_ecc_evp_params->negotiated_curve == server_ecc_evp_params->negotiated_curve, S2N_ERR_BAD_KEY_SHARE);
+ POSIX_ENSURE(client_ecc_evp_params->evp_pkey, S2N_ERR_BAD_KEY_SHARE);
uint16_t share_size;
S2N_ERROR_IF(s2n_stuffer_data_available(extension) < sizeof(share_size), S2N_ERR_BAD_KEY_SHARE);
- GUARD(s2n_stuffer_read_uint16(extension, &share_size));
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &share_size));
S2N_ERROR_IF(s2n_stuffer_data_available(extension) < share_size, S2N_ERR_BAD_KEY_SHARE);
/* Proceed to parse share */
@@ -286,31 +275,27 @@ static int s2n_server_key_share_recv_ecc(struct s2n_connection *conn, uint16_t n
*/
static int s2n_server_key_share_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- if (s2n_connection_get_protocol_version(conn) < S2N_TLS13) {
- return S2N_SUCCESS;
- }
-
- notnull_check(conn);
- notnull_check(extension);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(extension);
uint16_t negotiated_named_group_iana = 0;
S2N_ERROR_IF(s2n_stuffer_data_available(extension) < sizeof(negotiated_named_group_iana), S2N_ERR_BAD_KEY_SHARE);
- GUARD(s2n_stuffer_read_uint16(extension, &negotiated_named_group_iana));
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &negotiated_named_group_iana));
const struct s2n_kem_preferences *kem_pref = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
- notnull_check(kem_pref);
+ POSIX_GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
+ POSIX_ENSURE_REF(kem_pref);
const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
+ POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
+ POSIX_ENSURE_REF(ecc_pref);
if (s2n_ecc_preferences_includes_curve(ecc_pref, negotiated_named_group_iana)) {
- GUARD(s2n_server_key_share_recv_ecc(conn, negotiated_named_group_iana, extension));
+ POSIX_GUARD(s2n_server_key_share_recv_ecc(conn, negotiated_named_group_iana, extension));
} else if (s2n_kem_preferences_includes_tls13_kem_group(kem_pref, negotiated_named_group_iana)) {
- GUARD(s2n_server_key_share_recv_pq_hybrid(conn, negotiated_named_group_iana, extension));
+ POSIX_GUARD(s2n_server_key_share_recv_pq_hybrid(conn, negotiated_named_group_iana, extension));
} else {
- S2N_ERROR(S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
+ POSIX_BAIL(S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
}
return S2N_SUCCESS;
@@ -318,15 +303,15 @@ static int s2n_server_key_share_recv(struct s2n_connection *conn, struct s2n_stu
/* Selects highest priority mutually supported key share, or indicates need for HRR */
int s2n_extensions_server_key_share_select(struct s2n_connection *conn) {
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
+ POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
+ POSIX_ENSURE_REF(ecc_pref);
const struct s2n_kem_preferences *kem_pref = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
- notnull_check(kem_pref);
+ POSIX_GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
+ POSIX_ENSURE_REF(kem_pref);
/* Boolean XOR check. When receiving the supported_groups extension, s2n server
* should (exclusively) set either server_curve or server_kem_group based on the
@@ -335,44 +320,38 @@ int s2n_extensions_server_key_share_select(struct s2n_connection *conn) {
* groups; key negotiation is not possible and the handshake should be aborted
* without sending HRR. (The case of both being non-NULL should never occur, and
* is an error.) */
- const struct s2n_ecc_named_curve *server_curve = conn->secure.server_ecc_evp_params.negotiated_curve;
- const struct s2n_kem_group *server_kem_group = conn->secure.server_kem_group_params.kem_group;
- ENSURE_POSIX((server_curve == NULL) != (server_kem_group == NULL), S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
+ const struct s2n_ecc_named_curve *server_curve = conn->kex_params.server_ecc_evp_params.negotiated_curve;
+ const struct s2n_kem_group *server_kem_group = conn->kex_params.server_kem_group_params.kem_group;
+ POSIX_ENSURE((server_curve == NULL) != (server_kem_group == NULL), S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
/* To avoid extra round trips, we prefer to negotiate a group for which we have already
* received a key share (even if it is different than the group previously chosen). In
* general, we prefer to negotiate PQ over ECDHE; however, if both client and server
* support PQ, but the client sent only EC key shares, then we will negotiate ECHDE. */
- for (size_t i = 0; i < kem_pref->tls13_kem_group_count; i++) {
- if (conn->secure.mutually_supported_kem_groups[i] && conn->secure.client_kem_group_params[i].kem_group) {
- notnull_check(conn->secure.client_kem_group_params[i].ecc_params.negotiated_curve);
- notnull_check(conn->secure.client_kem_group_params[i].kem_params.kem);
+ if (conn->kex_params.client_kem_group_params.kem_group) {
+ POSIX_ENSURE_REF(conn->kex_params.client_kem_group_params.ecc_params.negotiated_curve);
+ POSIX_ENSURE_REF(conn->kex_params.client_kem_group_params.kem_params.kem);
- conn->secure.server_kem_group_params.kem_group = conn->secure.client_kem_group_params[i].kem_group;
- conn->secure.server_kem_group_params.ecc_params.negotiated_curve = conn->secure.client_kem_group_params[i].ecc_params.negotiated_curve;
- conn->secure.server_kem_group_params.kem_params.kem = conn->secure.client_kem_group_params[i].kem_params.kem;
- conn->secure.chosen_client_kem_group_params = &conn->secure.client_kem_group_params[i];
+ conn->kex_params.server_kem_group_params.kem_group = conn->kex_params.client_kem_group_params.kem_group;
+ conn->kex_params.server_kem_group_params.ecc_params.negotiated_curve = conn->kex_params.client_kem_group_params.ecc_params.negotiated_curve;
+ conn->kex_params.server_kem_group_params.kem_params.kem = conn->kex_params.client_kem_group_params.kem_params.kem;
- conn->secure.server_ecc_evp_params.negotiated_curve = NULL;
- return S2N_SUCCESS;
- }
+ conn->kex_params.server_ecc_evp_params.negotiated_curve = NULL;
+ return S2N_SUCCESS;
}
- for (size_t i = 0; i < ecc_pref->count; i++) {
- if (conn->secure.mutually_supported_curves[i] && conn->secure.client_ecc_evp_params[i].negotiated_curve) {
- conn->secure.server_ecc_evp_params.negotiated_curve = conn->secure.client_ecc_evp_params[i].negotiated_curve;
-
- conn->secure.server_kem_group_params.kem_group = NULL;
- conn->secure.server_kem_group_params.ecc_params.negotiated_curve = NULL;
- conn->secure.server_kem_group_params.kem_params.kem = NULL;
- conn->secure.chosen_client_kem_group_params = NULL;
- return S2N_SUCCESS;
- }
+ if (conn->kex_params.client_ecc_evp_params.negotiated_curve) {
+ conn->kex_params.server_ecc_evp_params.negotiated_curve = conn->kex_params.client_ecc_evp_params.negotiated_curve;
+
+ conn->kex_params.server_kem_group_params.kem_group = NULL;
+ conn->kex_params.server_kem_group_params.ecc_params.negotiated_curve = NULL;
+ conn->kex_params.server_kem_group_params.kem_params.kem = NULL;
+ return S2N_SUCCESS;
}
/* Server and client have mutually supported groups, but the client did not send key
* shares for any of them. Send HRR indicating the server's preference. */
- GUARD(s2n_set_hello_retry_required(conn));
+ POSIX_GUARD(s2n_set_hello_retry_required(conn));
return S2N_SUCCESS;
}
@@ -389,7 +368,7 @@ int s2n_extensions_server_key_share_select(struct s2n_connection *conn) {
*/
int s2n_extensions_server_key_share_send_size(struct s2n_connection *conn)
{
- const struct s2n_ecc_named_curve* curve = conn->secure.server_ecc_evp_params.negotiated_curve;
+ const struct s2n_ecc_named_curve* curve = conn->kex_params.server_ecc_evp_params.negotiated_curve;
int key_share_size = S2N_SIZE_OF_EXTENSION_TYPE
+ S2N_SIZE_OF_EXTENSION_DATA_SIZE
+ S2N_SIZE_OF_NAMED_GROUP;
@@ -422,7 +401,7 @@ int s2n_extensions_server_key_share_send(struct s2n_connection *conn, struct s2n
/*
* Client receives a Server Hello key share.
*
- * If the curve is supported, conn->secure.server_ecc_evp_params will be set.
+ * If the curve is supported, conn->kex_params.server_ecc_evp_params will be set.
*/
int s2n_extensions_server_key_share_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_max_fragment_length.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_max_fragment_length.c
index 69b1530e54..e55e3f21e5 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_max_fragment_length.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_max_fragment_length.c
@@ -13,12 +13,15 @@
* permissions and limitations under the License.
*/
+#include <sys/param.h>
+
#include "error/s2n_errno.h"
#include "stuffer/s2n_stuffer.h"
#include "utils/s2n_safety.h"
+#include "tls/s2n_tls.h"
#include "tls/s2n_tls_parameters.h"
#include "tls/s2n_connection.h"
@@ -39,23 +42,42 @@ const s2n_extension_type s2n_server_max_fragment_length_extension = {
static bool s2n_max_fragment_length_should_send(struct s2n_connection *conn)
{
- return conn && conn->mfl_code != S2N_TLS_MAX_FRAG_LEN_EXT_NONE;
+ return conn && conn->negotiated_mfl_code != S2N_TLS_MAX_FRAG_LEN_EXT_NONE;
}
static int s2n_max_fragment_length_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- notnull_check(conn);
- GUARD(s2n_stuffer_write_uint8(out, conn->mfl_code));
+ POSIX_ENSURE_REF(conn);
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, conn->negotiated_mfl_code));
return S2N_SUCCESS;
}
static int s2n_max_fragment_length_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- notnull_check(conn);
- notnull_check(conn->config);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->config);
uint8_t mfl_code;
- GUARD(s2n_stuffer_read_uint8(extension, &mfl_code));
+ POSIX_GUARD(s2n_stuffer_read_uint8(extension, &mfl_code));
+
+ /*
+ *= https://tools.ietf.org/rfc/rfc6066#section-4
+ *# Similarly, if a client
+ *# receives a maximum fragment length negotiation response that differs
+ *# from the length it requested, it MUST also abort the handshake with
+ *# an "illegal_parameter" alert.
+ */
S2N_ERROR_IF(mfl_code != conn->config->mfl_code, S2N_ERR_MAX_FRAG_LEN_MISMATCH);
+
+ /*
+ *= https://tools.ietf.org/rfc/rfc6066#section-4
+ *# Once a maximum fragment length other than 2^14 has been successfully
+ *# negotiated, the client and server MUST immediately begin fragmenting
+ *# messages (including handshake messages) to ensure that no fragment
+ *# larger than the negotiated length is sent.
+ */
+ conn->negotiated_mfl_code = mfl_code;
+ POSIX_GUARD_RESULT(s2n_connection_set_max_fragment_length(conn, conn->max_outgoing_fragment_length));
+
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_psk.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_psk.c
index cb930b17a4..9a36caff9f 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_psk.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_psk.c
@@ -28,6 +28,7 @@ static int s2n_server_psk_recv(struct s2n_connection *conn, struct s2n_stuffer *
const s2n_extension_type s2n_server_psk_extension = {
.iana_value = TLS_EXTENSION_PRE_SHARED_KEY,
+ .minimum_version = S2N_TLS13,
.is_response = true,
.send = s2n_server_psk_send,
.recv = s2n_server_psk_recv,
@@ -38,35 +39,30 @@ const s2n_extension_type s2n_server_psk_extension = {
static bool s2n_server_psk_should_send(struct s2n_connection *conn)
{
/* Only send a server pre_shared_key extension if a chosen PSK is set on the connection */
- return conn && s2n_connection_get_protocol_version(conn) >= S2N_TLS13
- && conn->psk_params.chosen_psk;
+ return conn && conn->psk_params.chosen_psk;
}
static int s2n_server_psk_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
/* Send the index of the chosen PSK that is stored on the connection. */
- GUARD(s2n_stuffer_write_uint16(out, conn->psk_params.chosen_psk_wire_index));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, conn->psk_params.chosen_psk_wire_index));
return S2N_SUCCESS;
}
static int s2n_server_psk_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- notnull_check(conn);
-
- if (s2n_connection_get_protocol_version(conn) < S2N_TLS13) {
- return S2N_SUCCESS;
- }
+ POSIX_ENSURE_REF(conn);
/* Currently in s2n, only (EC)DHE key exchange mode is supported.
* Any other mode selected by the server is invalid because it was not offered by the client.
* A key_share extension MUST have been received in order to use a pre-shared key in (EC)DHE key exchange mode.
*/
s2n_extension_type_id key_share_ext_id;
- GUARD(s2n_extension_supported_iana_value_to_id(TLS_EXTENSION_KEY_SHARE, &key_share_ext_id));
- ENSURE_POSIX(S2N_CBIT_TEST(conn->extension_requests_received, key_share_ext_id), S2N_ERR_MISSING_EXTENSION);
+ POSIX_GUARD(s2n_extension_supported_iana_value_to_id(TLS_EXTENSION_KEY_SHARE, &key_share_ext_id));
+ POSIX_ENSURE(S2N_CBIT_TEST(conn->extension_requests_received, key_share_ext_id), S2N_ERR_MISSING_EXTENSION);
/* From RFC section: https://tools.ietf.org/html/rfc8446#section-4.2.8.1
* Any future values that are allocated must ensure that the transmitted protocol messages
@@ -76,16 +72,16 @@ static int s2n_server_psk_recv(struct s2n_connection *conn, struct s2n_stuffer *
conn->psk_params.psk_ke_mode = S2N_PSK_DHE_KE;
uint16_t chosen_psk_wire_index = 0;
- GUARD(s2n_stuffer_read_uint16(extension, &chosen_psk_wire_index));
+ POSIX_GUARD(s2n_stuffer_read_uint16(extension, &chosen_psk_wire_index));
/* From RFC section: https://tools.ietf.org/html/rfc8446#section-4.2.11
* Clients MUST verify that the server's selected_identity is within the
* range supplied by the client.
*/
- ENSURE_POSIX(chosen_psk_wire_index < conn->psk_params.psk_list.len, S2N_ERR_INVALID_ARGUMENT);
+ POSIX_ENSURE(chosen_psk_wire_index < conn->psk_params.psk_list.len, S2N_ERR_INVALID_ARGUMENT);
conn->psk_params.chosen_psk_wire_index = chosen_psk_wire_index;
- GUARD_AS_POSIX(s2n_array_get(&conn->psk_params.psk_list, conn->psk_params.chosen_psk_wire_index,
+ POSIX_GUARD_RESULT(s2n_array_get(&conn->psk_params.psk_list, conn->psk_params.chosen_psk_wire_index,
(void **)&conn->psk_params.chosen_psk));
return S2N_SUCCESS;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_renegotiation_info.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_renegotiation_info.c
index aac791c43c..6da5481ec9 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_renegotiation_info.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_renegotiation_info.c
@@ -45,7 +45,7 @@ static bool s2n_renegotiation_info_should_send(struct s2n_connection *conn)
static int s2n_renegotiation_info_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
/* renegotiated_connection length. Zero since we don't support renegotiation. */
- GUARD(s2n_stuffer_write_uint8(out, 0));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, 0));
return S2N_SUCCESS;
}
@@ -55,11 +55,11 @@ static int s2n_renegotiation_info_recv(struct s2n_connection *conn, struct s2n_s
* the "renegotiated_connection" field is zero, and if it is not, MUST
* abort the handshake. */
uint8_t renegotiated_connection_len;
- GUARD(s2n_stuffer_read_uint8(extension, &renegotiated_connection_len));
+ POSIX_GUARD(s2n_stuffer_read_uint8(extension, &renegotiated_connection_len));
S2N_ERROR_IF(s2n_stuffer_data_available(extension), S2N_ERR_NON_EMPTY_RENEGOTIATION_INFO);
S2N_ERROR_IF(renegotiated_connection_len, S2N_ERR_NON_EMPTY_RENEGOTIATION_INFO);
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
conn->secure_renegotiation = 1;
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_sct_list.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_sct_list.c
index 8a8158aecf..cf28bef52b 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_sct_list.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_sct_list.c
@@ -41,27 +41,27 @@ static bool s2n_server_sct_list_should_send(struct s2n_connection *conn)
int s2n_server_sct_list_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
struct s2n_blob *sct_list = &conn->handshake_params.our_chain_and_key->sct_list;
- notnull_check(sct_list);
- GUARD(s2n_stuffer_write(out, sct_list));
+ POSIX_ENSURE_REF(sct_list);
+ POSIX_GUARD(s2n_stuffer_write(out, sct_list));
return S2N_SUCCESS;
}
int s2n_server_sct_list_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
struct s2n_blob sct_list;
size_t data_available = s2n_stuffer_data_available(extension);
- GUARD(s2n_blob_init(&sct_list,
+ POSIX_GUARD(s2n_blob_init(&sct_list,
s2n_stuffer_raw_read(extension, data_available),
data_available));
- notnull_check(sct_list.data);
+ POSIX_ENSURE_REF(sct_list.data);
- GUARD(s2n_dup(&sct_list, &conn->ct_response));
+ POSIX_GUARD(s2n_dup(&sct_list, &conn->ct_response));
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_server_name.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_server_name.c
index 158a9eae6c..239c84f1fb 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_server_name.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_server_name.c
@@ -33,7 +33,7 @@ const s2n_extension_type s2n_server_server_name_extension = {
static bool s2n_server_name_should_send(struct s2n_connection *conn)
{
- return conn && conn->server_name_used && !s2n_connection_is_session_resumed(conn);
+ return conn && conn->server_name_used && !IS_RESUMPTION_HANDSHAKE(conn);
}
static int s2n_server_name_send(struct s2n_connection *conn, struct s2n_stuffer *out)
@@ -44,7 +44,7 @@ static int s2n_server_name_send(struct s2n_connection *conn, struct s2n_stuffer
static int s2n_server_name_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
/* Read nothing. The extension just needs to exist. */
conn->server_name_used = 1;
return S2N_SUCCESS;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_session_ticket.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_session_ticket.c
index 74875f05a6..d4c9bf019f 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_session_ticket.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_session_ticket.c
@@ -40,7 +40,7 @@ static bool s2n_session_ticket_should_send(struct s2n_connection *conn)
static int s2n_session_ticket_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
/* Read nothing. The extension just needs to exist. */
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
conn->session_ticket_status = S2N_NEW_TICKET;
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_status_request.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_status_request.c
index 7003bf46ba..c73b5c32a4 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_status_request.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_status_request.c
@@ -39,7 +39,7 @@ static bool s2n_server_status_request_should_send(struct s2n_connection *conn)
int s2n_server_status_request_recv(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
/* Read nothing. The extension just needs to exist. */
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
conn->status_type = S2N_STATUS_REQUEST_OCSP;
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_supported_versions.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_supported_versions.c
index cd0fb015dc..53e387d8cc 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_server_supported_versions.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_server_supported_versions.c
@@ -52,8 +52,8 @@ const s2n_extension_type s2n_server_supported_versions_extension = {
static int s2n_server_supported_versions_send(struct s2n_connection *conn, struct s2n_stuffer *out)
{
- GUARD(s2n_stuffer_write_uint8(out, conn->server_protocol_version / 10));
- GUARD(s2n_stuffer_write_uint8(out, conn->server_protocol_version % 10));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, conn->server_protocol_version / 10));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, conn->server_protocol_version % 10));
return S2N_SUCCESS;
}
@@ -61,17 +61,18 @@ static int s2n_server_supported_versions_send(struct s2n_connection *conn, struc
static int s2n_extensions_server_supported_versions_process(struct s2n_connection *conn, struct s2n_stuffer *extension)
{
uint8_t highest_supported_version = conn->client_protocol_version;
- uint8_t minimum_supported_version;
- GUARD(s2n_connection_get_minimum_supported_version(conn, &minimum_supported_version));
+ uint8_t minimum_supported_version = s2n_unknown_protocol_version;
+ POSIX_GUARD_RESULT(s2n_connection_get_minimum_supported_version(conn, &minimum_supported_version));
+ POSIX_ENSURE(highest_supported_version >= minimum_supported_version, S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
uint8_t server_version_parts[S2N_TLS_PROTOCOL_VERSION_LEN];
- GUARD(s2n_stuffer_read_bytes(extension, server_version_parts, S2N_TLS_PROTOCOL_VERSION_LEN));
+ POSIX_GUARD(s2n_stuffer_read_bytes(extension, server_version_parts, S2N_TLS_PROTOCOL_VERSION_LEN));
uint16_t server_version = (server_version_parts[0] * 10) + server_version_parts[1];
- gte_check(server_version, S2N_TLS13);
- lte_check(server_version, highest_supported_version);
- gte_check(server_version, minimum_supported_version);
+ POSIX_ENSURE_GTE(server_version, S2N_TLS13);
+ POSIX_ENSURE_LTE(server_version, highest_supported_version);
+ POSIX_ENSURE_GTE(server_version, minimum_supported_version);
conn->server_protocol_version = server_version;
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_supported_versions.c b/contrib/restricted/aws/s2n/tls/extensions/s2n_supported_versions.c
index d02b3f920d..3f77789e31 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_supported_versions.c
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_supported_versions.c
@@ -21,11 +21,19 @@
#include "utils/s2n_safety.h"
-int s2n_connection_get_minimum_supported_version(struct s2n_connection *conn, uint8_t *min_version)
+S2N_RESULT s2n_connection_get_minimum_supported_version(struct s2n_connection *conn, uint8_t *min_version)
{
- const struct s2n_security_policy *security_policy;
- GUARD(s2n_connection_get_security_policy(conn, &security_policy));
+ RESULT_ENSURE_REF(min_version);
+
+ const struct s2n_security_policy *security_policy = NULL;
+ RESULT_GUARD_POSIX(s2n_connection_get_security_policy(conn, &security_policy));
+ RESULT_ENSURE_REF(security_policy);
*min_version = security_policy->minimum_protocol_version;
- return 0;
+ /* QUIC requires >= TLS1.3 */
+ if (s2n_connection_is_quic_enabled(conn)) {
+ *min_version = MAX(*min_version, S2N_TLS13);
+ }
+
+ return S2N_RESULT_OK;
}
diff --git a/contrib/restricted/aws/s2n/tls/extensions/s2n_supported_versions.h b/contrib/restricted/aws/s2n/tls/extensions/s2n_supported_versions.h
index 613830c347..6c1fdcea76 100644
--- a/contrib/restricted/aws/s2n/tls/extensions/s2n_supported_versions.h
+++ b/contrib/restricted/aws/s2n/tls/extensions/s2n_supported_versions.h
@@ -18,4 +18,4 @@
#include "tls/s2n_connection.h"
#include "stuffer/s2n_stuffer.h"
-extern int s2n_connection_get_minimum_supported_version(struct s2n_connection *conn, uint8_t *min_version);
+S2N_RESULT s2n_connection_get_minimum_supported_version(struct s2n_connection *conn, uint8_t *min_version);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_aead.c b/contrib/restricted/aws/s2n/tls/s2n_aead.c
index b7a0b23160..16bbee133b 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_aead.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_aead.c
@@ -18,61 +18,88 @@
#include "utils/s2n_safety.h"
#include "utils/s2n_mem.h"
+#include "tls/s2n_connection.h"
#include "tls/s2n_record.h"
/* Derive the AAD for an AEAD mode cipher suite from the connection state, per
* RFC 5246 section 6.2.3.3 */
-S2N_RESULT s2n_aead_aad_init(const struct s2n_connection *conn, uint8_t * sequence_number, uint8_t content_type, uint16_t record_length, struct s2n_stuffer *ad)
+S2N_RESULT s2n_aead_aad_init(const struct s2n_connection *conn, uint8_t * sequence_number, uint8_t content_type, uint16_t record_length, struct s2n_blob *ad)
{
+ RESULT_ENSURE_REF(ad);
+ RESULT_ENSURE_GTE(ad->size, S2N_TLS_MAX_AAD_LEN);
+
+ uint8_t *data = ad->data;
+ RESULT_GUARD_PTR(data);
+
/* ad = seq_num || record_type || version || length */
- GUARD_AS_RESULT(s2n_stuffer_write_bytes(ad, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
- GUARD_AS_RESULT(s2n_stuffer_write_uint8(ad, content_type));
- GUARD_AS_RESULT(s2n_stuffer_write_uint8(ad, conn->actual_protocol_version / 10));
- GUARD_AS_RESULT(s2n_stuffer_write_uint8(ad, conn->actual_protocol_version % 10));
- GUARD_AS_RESULT(s2n_stuffer_write_uint16(ad, record_length));
+ size_t idx = 0;
+ for(; idx < S2N_TLS_SEQUENCE_NUM_LEN; idx++) {
+ data[idx] = sequence_number[idx];
+ }
+
+ data[idx++] = content_type;
+ data[idx++] = conn->actual_protocol_version / 10;
+ data[idx++] = conn->actual_protocol_version % 10;
+ data[idx++] = record_length >> 8;
+ data[idx++] = record_length & UINT8_MAX;
+
+ /* Double check no overflow */
+ RESULT_ENSURE_LTE(idx, ad->size);
return S2N_RESULT_OK;
}
/* Prepares an AAD (additional authentication data) for a TLS 1.3 AEAD record */
-S2N_RESULT s2n_tls13_aead_aad_init(uint16_t record_length, uint8_t tag_length, struct s2n_stuffer *additional_data)
+S2N_RESULT s2n_tls13_aead_aad_init(uint16_t record_length, uint8_t tag_length, struct s2n_blob *additional_data)
{
- ENSURE_GT(tag_length, 0);
- ENSURE_REF(additional_data);
-
- /*
- * tls1.3 additional_data = opaque_type || legacy_record_version || length
- *
- * https://tools.ietf.org/html/rfc8446#section-5.2
- *
- * opaque_type: The outer opaque_type field of a TLSCiphertext record
- * is always set to the value 23 (application_data) for outward
- * compatibility with middleboxes accustomed to parsing previous
- * versions of TLS. The actual content type of the record is found
- * in TLSInnerPlaintext.type after decryption.
- * legacy_record_version: The legacy_record_version field is always
- * 0x0303. TLS 1.3 TLSCiphertexts are not generated until after
- * TLS 1.3 has been negotiated, so there are no historical
- * compatibility concerns where other values might be received. Note
- * that the handshake protocol, including the ClientHello and
- * ServerHello messages, authenticates the protocol version, so this
- * value is redundant.
- * length: The length (in bytes) of the following
- * TLSCiphertext.encrypted_record, which is the sum of the lengths of
- * the content and the padding, plus one for the inner content type,
- * plus any expansion added by the AEAD algorithm. The length
- * MUST NOT exceed 2^14 + 256 bytes. An endpoint that receives a
- * record that exceeds this length MUST terminate the connection with
- * a "record_overflow" alert.
+ RESULT_ENSURE_GT(tag_length, 0);
+ RESULT_ENSURE_REF(additional_data);
+ RESULT_ENSURE_GTE(additional_data->size, S2N_TLS13_AAD_LEN);
+
+ uint8_t *data = additional_data->data;
+ RESULT_GUARD_PTR(data);
+
+ size_t idx = 0;
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-5.2
+ *# opaque_type: The outer opaque_type field of a TLSCiphertext record
+ *# is always set to the value 23 (application_data) for outward
+ *# compatibility with middleboxes accustomed to parsing previous
+ *# versions of TLS. The actual content type of the record is found
+ *# in TLSInnerPlaintext.type after decryption.
+ **/
+ data[idx++] = TLS_APPLICATION_DATA;
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-5.2
+ *# legacy_record_version: The legacy_record_version field is always
+ *# 0x0303. TLS 1.3 TLSCiphertexts are not generated until after
+ *# TLS 1.3 has been negotiated, so there are no historical
+ *# compatibility concerns where other values might be received. Note
+ *# that the handshake protocol, including the ClientHello and
+ *# ServerHello messages, authenticates the protocol version, so this
+ *# value is redundant.
*/
+ data[idx++] = 0x03;
+ data[idx++] = 0x03;
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-5.2
+ *# length: The length (in bytes) of the following
+ *# TLSCiphertext.encrypted_record, which is the sum of the lengths of
+ *# the content and the padding, plus one for the inner content type,
+ *# plus any expansion added by the AEAD algorithm. The length
+ *# MUST NOT exceed 2^14 + 256 bytes. An endpoint that receives a
+ *# record that exceeds this length MUST terminate the connection with
+ *# a "record_overflow" alert.
+ */
uint16_t length = record_length + tag_length;
- ENSURE(length <= (1 << 14) + 256, S2N_ERR_RECORD_LIMIT);
-
- GUARD_AS_RESULT(s2n_stuffer_write_uint8(additional_data, TLS_APPLICATION_DATA)); /* fixed to 0x17 */
- GUARD_AS_RESULT(s2n_stuffer_write_uint8(additional_data, 3)); /* TLS record layer */
- GUARD_AS_RESULT(s2n_stuffer_write_uint8(additional_data, 3)); /* version fixed at 1.2 (0x0303) */
- GUARD_AS_RESULT(s2n_stuffer_write_uint16(additional_data, length));
+ RESULT_ENSURE(length <= (1 << 14) + 256, S2N_ERR_RECORD_LIMIT);
+ data[idx++] = length >> 8;
+ data[idx++] = length & UINT8_MAX;
+ /* Double check no overflow */
+ RESULT_ENSURE_LTE(idx, additional_data->size);
return S2N_RESULT_OK;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_alerts.c b/contrib/restricted/aws/s2n/tls/s2n_alerts.c
index 79f14f3214..c745a0d193 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_alerts.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_alerts.c
@@ -27,40 +27,101 @@
#include "utils/s2n_safety.h"
#include "utils/s2n_blob.h"
-#define S2N_TLS_ALERT_CLOSE_NOTIFY 0
-#define S2N_TLS_ALERT_UNEXPECTED_MSG 10
-#define S2N_TLS_ALERT_BAD_RECORD_MAC 20
-#define S2N_TLS_ALERT_DECRYPT_FAILED 21
-#define S2N_TLS_ALERT_RECORD_OVERFLOW 22
-#define S2N_TLS_ALERT_DECOMP_FAILED 30
-#define S2N_TLS_ALERT_HANDSHAKE_FAILURE 40
-#define S2N_TLS_ALERT_NO_CERTIFICATE 41
-#define S2N_TLS_ALERT_BAD_CERTIFICATE 42
-#define S2N_TLS_ALERT_UNSUPPORTED_CERT 43
-#define S2N_TLS_ALERT_CERT_REVOKED 44
-#define S2N_TLS_ALERT_CERT_EXPIRED 45
-#define S2N_TLS_ALERT_CERT_UNKNOWN 46
-#define S2N_TLS_ALERT_ILLEGAL_PARAMETER 47
-#define S2N_TLS_ALERT_UNKNOWN_CA 48
-#define S2N_TLS_ALERT_ACCESS_DENIED 49
-#define S2N_TLS_ALERT_DECODE_ERROR 50
-#define S2N_TLS_ALERT_DECRYPT_ERROR 51
-#define S2N_TLS_ALERT_EXPORT_RESTRICTED 60
-#define S2N_TLS_ALERT_PROTOCOL_VERSION 70
-#define S2N_TLS_ALERT_INSUFFICIENT_SECURITY 71
-#define S2N_TLS_ALERT_INTERNAL_ERROR 80
-#define S2N_TLS_ALERT_USER_CANCELED 90
-#define S2N_TLS_ALERT_NO_RENEGOTIATION 100
-#define S2N_TLS_ALERT_UNSUPPORTED_EXTENSION 110
-
#define S2N_TLS_ALERT_LEVEL_WARNING 1
#define S2N_TLS_ALERT_LEVEL_FATAL 2
+#define S2N_ALERT_CASE(error, alert_code) \
+ case (error): \
+ *alert = (alert_code); \
+ return S2N_RESULT_OK
+
+#define S2N_NO_ALERT(error) \
+ case (error): \
+ RESULT_BAIL(S2N_ERR_NO_ALERT)
+
+static S2N_RESULT s2n_translate_protocol_error_to_alert(int error_code, uint8_t *alert)
+{
+ RESULT_ENSURE_REF(alert);
+
+ switch(error_code) {
+ S2N_ALERT_CASE(S2N_ERR_MISSING_EXTENSION, S2N_TLS_ALERT_MISSING_EXTENSION);
+
+ /* TODO: The ERR_BAD_MESSAGE -> ALERT_UNEXPECTED_MESSAGE mapping
+ * isn't always correct. Sometimes s2n-tls uses ERR_BAD_MESSAGE
+ * to indicate S2N_TLS_ALERT_ILLEGAL_PARAMETER instead.
+ * We'll want to add a new error to distinguish between the two usages:
+ * our errors should be equally or more specific than alerts, not less.
+ */
+ S2N_ALERT_CASE(S2N_ERR_BAD_MESSAGE, S2N_TLS_ALERT_UNEXPECTED_MESSAGE);
+
+ /* TODO: Add mappings for other protocol errors.
+ */
+ S2N_NO_ALERT(S2N_ERR_ENCRYPT);
+ S2N_NO_ALERT(S2N_ERR_DECRYPT);
+ S2N_NO_ALERT(S2N_ERR_KEY_INIT);
+ S2N_NO_ALERT(S2N_ERR_KEY_DESTROY);
+ S2N_NO_ALERT(S2N_ERR_DH_SERIALIZING);
+ S2N_NO_ALERT(S2N_ERR_DH_SHARED_SECRET);
+ S2N_NO_ALERT(S2N_ERR_DH_WRITING_PUBLIC_KEY);
+ S2N_NO_ALERT(S2N_ERR_DH_FAILED_SIGNING);
+ S2N_NO_ALERT(S2N_ERR_DH_COPYING_PARAMETERS);
+ S2N_NO_ALERT(S2N_ERR_DH_GENERATING_PARAMETERS);
+ S2N_NO_ALERT(S2N_ERR_CIPHER_NOT_SUPPORTED);
+ S2N_NO_ALERT(S2N_ERR_NO_APPLICATION_PROTOCOL);
+ S2N_NO_ALERT(S2N_ERR_FALLBACK_DETECTED);
+ S2N_NO_ALERT(S2N_ERR_HASH_DIGEST_FAILED);
+ S2N_NO_ALERT(S2N_ERR_HASH_INIT_FAILED);
+ S2N_NO_ALERT(S2N_ERR_HASH_UPDATE_FAILED);
+ S2N_NO_ALERT(S2N_ERR_HASH_COPY_FAILED);
+ S2N_NO_ALERT(S2N_ERR_HASH_WIPE_FAILED);
+ S2N_NO_ALERT(S2N_ERR_HASH_NOT_READY);
+ S2N_NO_ALERT(S2N_ERR_ALLOW_MD5_FOR_FIPS_FAILED);
+ S2N_NO_ALERT(S2N_ERR_DECODE_CERTIFICATE);
+ S2N_NO_ALERT(S2N_ERR_DECODE_PRIVATE_KEY);
+ S2N_NO_ALERT(S2N_ERR_INVALID_HELLO_RETRY);
+ S2N_NO_ALERT(S2N_ERR_INVALID_SIGNATURE_ALGORITHM);
+ S2N_NO_ALERT(S2N_ERR_INVALID_SIGNATURE_SCHEME);
+ S2N_NO_ALERT(S2N_ERR_CBC_VERIFY);
+ S2N_NO_ALERT(S2N_ERR_DH_COPYING_PUBLIC_KEY);
+ S2N_NO_ALERT(S2N_ERR_SIGN);
+ S2N_NO_ALERT(S2N_ERR_VERIFY_SIGNATURE);
+ S2N_NO_ALERT(S2N_ERR_ECDHE_GEN_KEY);
+ S2N_NO_ALERT(S2N_ERR_ECDHE_SHARED_SECRET);
+ S2N_NO_ALERT(S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
+ S2N_NO_ALERT(S2N_ERR_ECDSA_UNSUPPORTED_CURVE);
+ S2N_NO_ALERT(S2N_ERR_ECDHE_SERIALIZING);
+ S2N_NO_ALERT(S2N_ERR_KEM_UNSUPPORTED_PARAMS);
+ S2N_NO_ALERT(S2N_ERR_SHUTDOWN_RECORD_TYPE);
+ S2N_NO_ALERT(S2N_ERR_SHUTDOWN_CLOSED);
+ S2N_NO_ALERT(S2N_ERR_NON_EMPTY_RENEGOTIATION_INFO);
+ S2N_NO_ALERT(S2N_ERR_RECORD_LIMIT);
+ S2N_NO_ALERT(S2N_ERR_CERT_UNTRUSTED);
+ S2N_NO_ALERT(S2N_ERR_CERT_TYPE_UNSUPPORTED);
+ S2N_NO_ALERT(S2N_ERR_INVALID_MAX_FRAG_LEN);
+ S2N_NO_ALERT(S2N_ERR_MAX_FRAG_LEN_MISMATCH);
+ S2N_NO_ALERT(S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
+ S2N_NO_ALERT(S2N_ERR_BAD_KEY_SHARE);
+ S2N_NO_ALERT(S2N_ERR_CANCELLED);
+ S2N_NO_ALERT(S2N_ERR_PROTOCOL_DOWNGRADE_DETECTED);
+ S2N_NO_ALERT(S2N_ERR_MAX_INNER_PLAINTEXT_SIZE);
+ S2N_NO_ALERT(S2N_ERR_RECORD_STUFFER_SIZE);
+ S2N_NO_ALERT(S2N_ERR_FRAGMENT_LENGTH_TOO_LARGE);
+ S2N_NO_ALERT(S2N_ERR_FRAGMENT_LENGTH_TOO_SMALL);
+ S2N_NO_ALERT(S2N_ERR_RECORD_STUFFER_NEEDS_DRAINING);
+ S2N_NO_ALERT(S2N_ERR_UNSUPPORTED_EXTENSION);
+ S2N_NO_ALERT(S2N_ERR_DUPLICATE_EXTENSION);
+ S2N_NO_ALERT(S2N_ERR_MAX_EARLY_DATA_SIZE);
+ S2N_NO_ALERT(S2N_ERR_EARLY_DATA_TRIAL_DECRYPT);
+ }
+
+ RESULT_BAIL(S2N_ERR_UNIMPLEMENTED);
+}
+
static bool s2n_alerts_supported(struct s2n_connection *conn)
{
/* If running in QUIC mode, QUIC handles alerting.
* S2N should not send or receive alerts. */
- return conn && conn->config && !conn->config->quic_enabled;
+ return !s2n_connection_is_quic_enabled(conn);
}
static bool s2n_handle_as_warning(struct s2n_connection *conn, uint8_t level, uint8_t type)
@@ -78,12 +139,38 @@ static bool s2n_handle_as_warning(struct s2n_connection *conn, uint8_t level, ui
return type == S2N_TLS_ALERT_USER_CANCELED;
}
+int s2n_error_get_alert(int error, uint8_t *alert)
+{
+ int error_type = s2n_error_get_type(error);
+
+ POSIX_ENSURE_REF(alert);
+
+ switch(error_type) {
+ case S2N_ERR_T_OK:
+ case S2N_ERR_T_CLOSED:
+ case S2N_ERR_T_BLOCKED:
+ case S2N_ERR_T_USAGE:
+ case S2N_ERR_T_ALERT:
+ POSIX_BAIL(S2N_ERR_NO_ALERT);
+ break;
+ case S2N_ERR_T_PROTO:
+ POSIX_GUARD_RESULT(s2n_translate_protocol_error_to_alert(error, alert));
+ break;
+ case S2N_ERR_T_IO:
+ case S2N_ERR_T_INTERNAL:
+ *alert = S2N_TLS_ALERT_INTERNAL_ERROR;
+ break;
+ }
+
+ return S2N_SUCCESS;
+}
+
int s2n_process_alert_fragment(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
S2N_ERROR_IF(s2n_stuffer_data_available(&conn->in) == 0, S2N_ERR_BAD_MESSAGE);
S2N_ERROR_IF(s2n_stuffer_data_available(&conn->alert_in) == 2, S2N_ERR_ALERT_PRESENT);
- ENSURE_POSIX(s2n_alerts_supported(conn), S2N_ERR_BAD_MESSAGE);
+ POSIX_ENSURE(s2n_alerts_supported(conn), S2N_ERR_BAD_MESSAGE);
while (s2n_stuffer_data_available(&conn->in)) {
uint8_t bytes_required = 2;
@@ -95,19 +182,20 @@ int s2n_process_alert_fragment(struct s2n_connection *conn)
int bytes_to_read = MIN(bytes_required, s2n_stuffer_data_available(&conn->in));
- GUARD(s2n_stuffer_copy(&conn->in, &conn->alert_in, bytes_to_read));
+ POSIX_GUARD(s2n_stuffer_copy(&conn->in, &conn->alert_in, bytes_to_read));
if (s2n_stuffer_data_available(&conn->alert_in) == 2) {
/* Close notifications are handled as shutdowns */
if (conn->alert_in_data[1] == S2N_TLS_ALERT_CLOSE_NOTIFY) {
conn->closed = 1;
+ conn->close_notify_received = true;
return 0;
}
/* Ignore warning-level alerts if we're in warning-tolerant mode */
if (s2n_handle_as_warning(conn, conn->alert_in_data[0], conn->alert_in_data[1])) {
- GUARD(s2n_stuffer_wipe(&conn->alert_in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->alert_in));
return 0;
}
@@ -118,7 +206,7 @@ int s2n_process_alert_fragment(struct s2n_connection *conn)
/* All other alerts are treated as fatal errors */
conn->closed = 1;
- S2N_ERROR(S2N_ERR_ALERT);
+ POSIX_BAIL(S2N_ERR_ALERT);
}
}
@@ -127,7 +215,7 @@ int s2n_process_alert_fragment(struct s2n_connection *conn)
int s2n_queue_writer_close_alert_warning(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
uint8_t alert[2];
alert[0] = S2N_TLS_ALERT_LEVEL_WARNING;
@@ -144,7 +232,7 @@ int s2n_queue_writer_close_alert_warning(struct s2n_connection *conn)
return S2N_SUCCESS;
}
- GUARD(s2n_stuffer_write(&conn->writer_alert_out, &out));
+ POSIX_GUARD(s2n_stuffer_write(&conn->writer_alert_out, &out));
conn->close_notify_queued = 1;
return S2N_SUCCESS;
@@ -152,7 +240,7 @@ int s2n_queue_writer_close_alert_warning(struct s2n_connection *conn)
static int s2n_queue_reader_alert(struct s2n_connection *conn, uint8_t level, uint8_t error_code)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
uint8_t alert[2];
alert[0] = level;
@@ -169,7 +257,7 @@ static int s2n_queue_reader_alert(struct s2n_connection *conn, uint8_t level, ui
return S2N_SUCCESS;
}
- GUARD(s2n_stuffer_write(&conn->reader_alert_out, &out));
+ POSIX_GUARD(s2n_stuffer_write(&conn->reader_alert_out, &out));
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_alerts.h b/contrib/restricted/aws/s2n/tls/s2n_alerts.h
index 694c64068c..da735db9d8 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_alerts.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_alerts.h
@@ -19,6 +19,84 @@
#include "tls/s2n_connection.h"
+typedef enum {
+ /*
+ *= https://tools.ietf.org/rfc/rfc8446#section-6
+ *# enum {
+ *# close_notify(0),
+ *# unexpected_message(10),
+ *# bad_record_mac(20),
+ *# record_overflow(22),
+ *# handshake_failure(40),
+ */
+ S2N_TLS_ALERT_CLOSE_NOTIFY = 0,
+ S2N_TLS_ALERT_UNEXPECTED_MESSAGE = 10,
+ S2N_TLS_ALERT_BAD_RECORD_MAC = 20,
+ S2N_TLS_ALERT_RECORD_OVERFLOW = 22,
+ S2N_TLS_ALERT_HANDSHAKE_FAILURE = 40,
+ /*
+ *= https://tools.ietf.org/rfc/rfc8446#section-6
+ *# bad_certificate(42),
+ *# unsupported_certificate(43),
+ *# certificate_revoked(44),
+ *# certificate_expired(45),
+ *# certificate_unknown(46),
+ */
+ S2N_TLS_ALERT_BAD_CERTIFICATE = 42,
+ S2N_TLS_ALERT_UNSUPPORTED_CERTIFICATE = 43,
+ S2N_TLS_ALERT_CERTIFICATE_REVOKED = 44,
+ S2N_TLS_ALERT_CERTIFICATE_EXPIRED = 45,
+ S2N_TLS_ALERT_CERTIFICATE_UNKNOWN = 46,
+ /*
+ *= https://tools.ietf.org/rfc/rfc8446#section-6
+ *# illegal_parameter(47),
+ *# unknown_ca(48),
+ *# access_denied(49),
+ *# decode_error(50),
+ *# decrypt_error(51),
+ */
+ S2N_TLS_ALERT_ILLEGAL_PARAMETER = 47,
+ S2N_TLS_ALERT_UNKNOWN_CA = 48,
+ S2N_TLS_ALERT_ACCESS_DENIED = 49,
+ S2N_TLS_ALERT_DECODE_ERROR = 50,
+ S2N_TLS_ALERT_DECRYPT_ERROR = 51,
+ /*
+ *= https://tools.ietf.org/rfc/rfc8446#section-6
+ *# protocol_version(70),
+ *# insufficient_security(71),
+ *# internal_error(80),
+ *# inappropriate_fallback(86),
+ *# user_canceled(90),
+ */
+ S2N_TLS_ALERT_PROTOCOL_VERSION = 70,
+ S2N_TLS_ALERT_INSUFFICIENT_SECURITY = 71,
+ S2N_TLS_ALERT_INTERNAL_ERROR = 80,
+ S2N_TLS_ALERT_INAPPROPRIATE_FALLBACK = 86,
+ S2N_TLS_ALERT_USER_CANCELED = 90,
+ /*
+ *= https://tools.ietf.org/rfc/rfc8446#section-6
+ *# missing_extension(109),
+ *# unsupported_extension(110),
+ *# unrecognized_name(112),
+ *# bad_certificate_status_response(113),
+ *# unknown_psk_identity(115),
+ */
+ S2N_TLS_ALERT_MISSING_EXTENSION = 109,
+ S2N_TLS_ALERT_UNSUPPORTED_EXTENSION = 110,
+ S2N_TLS_ALERT_UNRECOGNIZED_NAME = 112,
+ S2N_TLS_ALERT_BAD_CERTIFICATE_STATUS_RESPONSE = 113,
+ S2N_TLS_ALERT_UNKNOWN_PSK_IDENTITY = 115,
+ /*
+ *= https://tools.ietf.org/rfc/rfc8446#section-6
+ *# certificate_required(116),
+ *# no_application_protocol(120),
+ *# (255)
+ *# } AlertDescription;
+ */
+ S2N_TLS_ALERT_CERTIFICATE_REQUIRED = 116,
+ S2N_TLS_ALERT_NO_APPLICATION_PROTOCOL = 120,
+} s2n_tls_alert_code;
+
extern int s2n_process_alert_fragment(struct s2n_connection *conn);
extern int s2n_queue_writer_close_alert_warning(struct s2n_connection *conn);
extern int s2n_queue_reader_unsupported_protocol_version_alert(struct s2n_connection *conn);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_async_pkey.c b/contrib/restricted/aws/s2n/tls/s2n_async_pkey.c
index d1960eac69..062324db57 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_async_pkey.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_async_pkey.c
@@ -17,7 +17,7 @@
#include "crypto/s2n_hash.h"
#include "crypto/s2n_signature.h"
#include "error/s2n_errno.h"
-#include "s2n.h"
+#include "api/s2n.h"
#include "tls/s2n_connection.h"
#include "tls/s2n_handshake.h"
#include "utils/s2n_blob.h"
@@ -25,8 +25,6 @@
#include "utils/s2n_result.h"
#include "utils/s2n_safety.h"
-typedef enum { S2N_ASYNC_DECRYPT, S2N_ASYNC_SIGN } s2n_async_pkey_op_type;
-
struct s2n_async_pkey_decrypt_data {
s2n_async_pkey_decrypt_complete on_complete;
struct s2n_blob encrypted;
@@ -44,6 +42,7 @@ struct s2n_async_pkey_sign_data {
struct s2n_async_pkey_op {
s2n_async_pkey_op_type type;
struct s2n_connection *conn;
+ s2n_async_pkey_validation_mode validation_mode;
unsigned complete : 1;
unsigned applied : 1;
union {
@@ -55,6 +54,9 @@ struct s2n_async_pkey_op {
struct s2n_async_pkey_op_actions {
S2N_RESULT (*perform)(struct s2n_async_pkey_op *op, s2n_cert_private_key *pkey);
S2N_RESULT (*apply)(struct s2n_async_pkey_op *op, struct s2n_connection *conn);
+ S2N_RESULT (*get_input_size)(struct s2n_async_pkey_op *op, uint32_t *data_len);
+ S2N_RESULT (*get_input)(struct s2n_async_pkey_op *op, uint8_t *data, uint32_t data_len);
+ S2N_RESULT (*set_output)(struct s2n_async_pkey_op *op, const uint8_t *data, uint32_t data_len);
S2N_RESULT (*free)(struct s2n_async_pkey_op *op);
};
@@ -76,25 +78,37 @@ static S2N_RESULT s2n_async_pkey_decrypt_sync(struct s2n_connection *conn, struc
static S2N_RESULT s2n_async_pkey_decrypt_perform(struct s2n_async_pkey_op *op, s2n_cert_private_key *pkey);
static S2N_RESULT s2n_async_pkey_decrypt_apply(struct s2n_async_pkey_op *op, struct s2n_connection *conn);
+static S2N_RESULT s2n_async_pkey_get_input_size_decrypt(struct s2n_async_pkey_op *op, uint32_t *data_len);
+static S2N_RESULT s2n_async_pkey_get_input_decrypt(struct s2n_async_pkey_op *op, uint8_t *data, uint32_t data_len);
+static S2N_RESULT s2n_async_pkey_op_set_output_decrypt(struct s2n_async_pkey_op *op, const uint8_t *data, uint32_t data_len);
static S2N_RESULT s2n_async_pkey_decrypt_free(struct s2n_async_pkey_op *op);
static S2N_RESULT s2n_async_pkey_sign_perform(struct s2n_async_pkey_op *op, s2n_cert_private_key *pkey);
static S2N_RESULT s2n_async_pkey_sign_apply(struct s2n_async_pkey_op *op, struct s2n_connection *conn);
+static S2N_RESULT s2n_async_pkey_get_input_size_sign(struct s2n_async_pkey_op *op, uint32_t *data_len);
+static S2N_RESULT s2n_async_pkey_get_input_sign(struct s2n_async_pkey_op *op, uint8_t *data, uint32_t data_len);
+static S2N_RESULT s2n_async_pkey_op_set_output_sign(struct s2n_async_pkey_op *op, const uint8_t *data, uint32_t data_len);
static S2N_RESULT s2n_async_pkey_sign_free(struct s2n_async_pkey_op *op);
static const struct s2n_async_pkey_op_actions s2n_async_pkey_decrypt_op = { .perform = &s2n_async_pkey_decrypt_perform,
.apply = &s2n_async_pkey_decrypt_apply,
+ .get_input_size = &s2n_async_pkey_get_input_size_decrypt,
+ .get_input = &s2n_async_pkey_get_input_decrypt,
+ .set_output = &s2n_async_pkey_op_set_output_decrypt,
.free = &s2n_async_pkey_decrypt_free };
static const struct s2n_async_pkey_op_actions s2n_async_pkey_sign_op = { .perform = &s2n_async_pkey_sign_perform,
.apply = &s2n_async_pkey_sign_apply,
+ .get_input_size = &s2n_async_pkey_get_input_size_sign,
+ .get_input = &s2n_async_pkey_get_input_sign,
+ .set_output = &s2n_async_pkey_op_set_output_sign,
.free = &s2n_async_pkey_sign_free };
DEFINE_POINTER_CLEANUP_FUNC(struct s2n_async_pkey_op *, s2n_async_pkey_op_free);
static S2N_RESULT s2n_async_get_actions(s2n_async_pkey_op_type type, const struct s2n_async_pkey_op_actions **actions)
{
- ENSURE_REF(actions);
+ RESULT_ENSURE_REF(actions);
switch (type) {
case S2N_ASYNC_DECRYPT:
@@ -111,13 +125,13 @@ static S2N_RESULT s2n_async_get_actions(s2n_async_pkey_op_type type, const struc
static S2N_RESULT s2n_async_pkey_op_allocate(struct s2n_async_pkey_op **op)
{
- ENSURE_REF(op);
- ENSURE(*op == NULL, S2N_ERR_SAFETY);
+ RESULT_ENSURE_REF(op);
+ RESULT_ENSURE(*op == NULL, S2N_ERR_SAFETY);
/* allocate memory */
DEFER_CLEANUP(struct s2n_blob mem = {0}, s2n_free);
- GUARD_AS_RESULT(s2n_alloc(&mem, sizeof(struct s2n_async_pkey_op)));
- GUARD_AS_RESULT(s2n_blob_zero(&mem));
+ RESULT_GUARD_POSIX(s2n_alloc(&mem, sizeof(struct s2n_async_pkey_op)));
+ RESULT_GUARD_POSIX(s2n_blob_zero(&mem));
*op = (void *) mem.data;
if (s2n_blob_init(&mem, NULL, 0) != S2N_SUCCESS) {
@@ -130,70 +144,82 @@ static S2N_RESULT s2n_async_pkey_op_allocate(struct s2n_async_pkey_op **op)
S2N_RESULT s2n_async_pkey_decrypt(struct s2n_connection *conn, struct s2n_blob *encrypted,
struct s2n_blob *init_decrypted, s2n_async_pkey_decrypt_complete on_complete)
{
- ENSURE_REF(conn);
- ENSURE_REF(encrypted);
- ENSURE_REF(init_decrypted);
- ENSURE_REF(on_complete);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(encrypted);
+ RESULT_ENSURE_REF(init_decrypted);
+ RESULT_ENSURE_REF(on_complete);
if (conn->config->async_pkey_cb) {
- GUARD_RESULT(s2n_async_pkey_decrypt_async(conn, encrypted, init_decrypted, on_complete));
+ RESULT_GUARD(s2n_async_pkey_decrypt_async(conn, encrypted, init_decrypted, on_complete));
} else {
- GUARD_RESULT(s2n_async_pkey_decrypt_sync(conn, encrypted, init_decrypted, on_complete));
+ RESULT_GUARD(s2n_async_pkey_decrypt_sync(conn, encrypted, init_decrypted, on_complete));
}
return S2N_RESULT_OK;
}
+S2N_RESULT s2n_async_cb_execute(struct s2n_connection *conn, struct s2n_async_pkey_op **owned_op)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(owned_op);
+ RESULT_ENSURE(conn->handshake.async_state == S2N_ASYNC_NOT_INVOKED, S2N_ERR_ASYNC_MORE_THAN_ONE);
+
+ /* The callback now owns the operation, meaning we can't free it.
+ * Wipe our version and pass a copy to the callback.
+ */
+ struct s2n_async_pkey_op *unowned_op = *owned_op;
+ ZERO_TO_DISABLE_DEFER_CLEANUP(*owned_op);
+
+ conn->handshake.async_state = S2N_ASYNC_INVOKED;
+ RESULT_ENSURE(conn->config->async_pkey_cb(conn, unowned_op) == S2N_SUCCESS, S2N_ERR_ASYNC_CALLBACK_FAILED);
+
+ /*
+ * If the callback already completed the operation, continue.
+ * Otherwise, we need to block s2n_negotiate and wait for the operation to complete.
+ */
+ if (conn->handshake.async_state == S2N_ASYNC_COMPLETE) {
+ return S2N_RESULT_OK;
+ }
+ RESULT_BAIL(S2N_ERR_ASYNC_BLOCKED);
+}
+
S2N_RESULT s2n_async_pkey_decrypt_async(struct s2n_connection *conn, struct s2n_blob *encrypted,
struct s2n_blob *init_decrypted, s2n_async_pkey_decrypt_complete on_complete)
{
- ENSURE_REF(conn);
- ENSURE_REF(encrypted);
- ENSURE_REF(init_decrypted);
- ENSURE_REF(on_complete);
- ENSURE(conn->handshake.async_state == S2N_ASYNC_NOT_INVOKED, S2N_ERR_ASYNC_MORE_THAN_ONE);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(encrypted);
+ RESULT_ENSURE_REF(init_decrypted);
+ RESULT_ENSURE_REF(on_complete);
DEFER_CLEANUP(struct s2n_async_pkey_op *op = NULL, s2n_async_pkey_op_free_pointer);
- GUARD_RESULT(s2n_async_pkey_op_allocate(&op));
+ RESULT_GUARD(s2n_async_pkey_op_allocate(&op));
op->type = S2N_ASYNC_DECRYPT;
op->conn = conn;
+ op->validation_mode = conn->config->async_pkey_validation_mode;
struct s2n_async_pkey_decrypt_data *decrypt = &op->op.decrypt;
decrypt->on_complete = on_complete;
- GUARD_AS_RESULT(s2n_dup(encrypted, &decrypt->encrypted));
- GUARD_AS_RESULT(s2n_dup(init_decrypted, &decrypt->decrypted));
-
- /* Block the handshake and set async state to invoking to block async states */
- GUARD_AS_RESULT(s2n_conn_set_handshake_read_block(conn));
- conn->handshake.async_state = S2N_ASYNC_INVOKING_CALLBACK;
-
- /* Move op to tmp to avoid DEFER_CLEANUP freeing the op, as it will be owned by callback */
- struct s2n_async_pkey_op *tmp_op = op;
- op = NULL;
-
- ENSURE(conn->config->async_pkey_cb(conn, tmp_op) == S2N_SUCCESS, S2N_ERR_ASYNC_CALLBACK_FAILED);
+ RESULT_GUARD_POSIX(s2n_dup(encrypted, &decrypt->encrypted));
+ RESULT_GUARD_POSIX(s2n_dup(init_decrypted, &decrypt->decrypted));
- /* Set state to waiting to allow op to be consumed by connection */
- conn->handshake.async_state = S2N_ASYNC_INVOKED_WAITING;
-
- /* Return an async blocked error to drop out of s2n_negotiate loop */
- BAIL(S2N_ERR_ASYNC_BLOCKED);
+ RESULT_GUARD(s2n_async_cb_execute(conn, &op));
+ return S2N_RESULT_OK;
}
S2N_RESULT s2n_async_pkey_decrypt_sync(struct s2n_connection *conn, struct s2n_blob *encrypted,
struct s2n_blob *init_decrypted, s2n_async_pkey_decrypt_complete on_complete)
{
- ENSURE_REF(conn);
- ENSURE_REF(encrypted);
- ENSURE_REF(init_decrypted);
- ENSURE_REF(on_complete);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(encrypted);
+ RESULT_ENSURE_REF(init_decrypted);
+ RESULT_ENSURE_REF(on_complete);
const struct s2n_pkey *pkey = conn->handshake_params.our_chain_and_key->private_key;
bool rsa_failed = s2n_pkey_decrypt(pkey, encrypted, init_decrypted) != S2N_SUCCESS;
- GUARD_AS_RESULT(on_complete(conn, rsa_failed, init_decrypted));
+ RESULT_GUARD_POSIX(on_complete(conn, rsa_failed, init_decrypted));
return S2N_RESULT_OK;
}
@@ -201,14 +227,14 @@ S2N_RESULT s2n_async_pkey_decrypt_sync(struct s2n_connection *conn, struct s2n_b
S2N_RESULT s2n_async_pkey_sign(struct s2n_connection *conn, s2n_signature_algorithm sig_alg,
struct s2n_hash_state *digest, s2n_async_pkey_sign_complete on_complete)
{
- ENSURE_REF(conn);
- ENSURE_REF(digest);
- ENSURE_REF(on_complete);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(digest);
+ RESULT_ENSURE_REF(on_complete);
if (conn->config->async_pkey_cb) {
- GUARD_RESULT(s2n_async_pkey_sign_async(conn, sig_alg, digest, on_complete));
+ RESULT_GUARD(s2n_async_pkey_sign_async(conn, sig_alg, digest, on_complete));
} else {
- GUARD_RESULT(s2n_async_pkey_sign_sync(conn, sig_alg, digest, on_complete));
+ RESULT_GUARD(s2n_async_pkey_sign_sync(conn, sig_alg, digest, on_complete));
}
return S2N_RESULT_OK;
@@ -217,73 +243,60 @@ S2N_RESULT s2n_async_pkey_sign(struct s2n_connection *conn, s2n_signature_algori
S2N_RESULT s2n_async_pkey_sign_async(struct s2n_connection *conn, s2n_signature_algorithm sig_alg,
struct s2n_hash_state *digest, s2n_async_pkey_sign_complete on_complete)
{
- ENSURE_REF(conn);
- ENSURE_REF(digest);
- ENSURE_REF(on_complete);
- ENSURE(conn->handshake.async_state == S2N_ASYNC_NOT_INVOKED, S2N_ERR_ASYNC_MORE_THAN_ONE);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(digest);
+ RESULT_ENSURE_REF(on_complete);
DEFER_CLEANUP(struct s2n_async_pkey_op *op = NULL, s2n_async_pkey_op_free_pointer);
- GUARD_RESULT(s2n_async_pkey_op_allocate(&op));
+ RESULT_GUARD(s2n_async_pkey_op_allocate(&op));
op->type = S2N_ASYNC_SIGN;
op->conn = conn;
+ op->validation_mode = conn->config->async_pkey_validation_mode;
struct s2n_async_pkey_sign_data *sign = &op->op.sign;
sign->on_complete = on_complete;
sign->sig_alg = sig_alg;
- GUARD_AS_RESULT(s2n_hash_new(&sign->digest));
- GUARD_AS_RESULT(s2n_hash_copy(&sign->digest, digest));
-
- /* Block the handshake and set async state to invoking to block async states */
- GUARD_AS_RESULT(s2n_conn_set_handshake_read_block(conn));
- conn->handshake.async_state = S2N_ASYNC_INVOKING_CALLBACK;
-
- /* Move op to tmp to avoid DEFER_CLEANUP freeing the op, as it will be owned by callback */
- struct s2n_async_pkey_op *tmp_op = op;
- op = NULL;
+ RESULT_GUARD_POSIX(s2n_hash_new(&sign->digest));
+ RESULT_GUARD_POSIX(s2n_hash_copy(&sign->digest, digest));
- ENSURE(conn->config->async_pkey_cb(conn, tmp_op) == S2N_SUCCESS, S2N_ERR_ASYNC_CALLBACK_FAILED);
-
- /* Set state to waiting to allow op to be consumed by connection */
- conn->handshake.async_state = S2N_ASYNC_INVOKED_WAITING;
-
- /* Return an async blocked error to drop out of s2n_negotiate loop */
- BAIL(S2N_ERR_ASYNC_BLOCKED);
+ RESULT_GUARD(s2n_async_cb_execute(conn, &op));
+ return S2N_RESULT_OK;
}
S2N_RESULT s2n_async_pkey_sign_sync(struct s2n_connection *conn, s2n_signature_algorithm sig_alg,
struct s2n_hash_state *digest, s2n_async_pkey_sign_complete on_complete)
{
- ENSURE_REF(conn);
- ENSURE_REF(digest);
- ENSURE_REF(on_complete);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(digest);
+ RESULT_ENSURE_REF(on_complete);
const struct s2n_pkey *pkey = conn->handshake_params.our_chain_and_key->private_key;
DEFER_CLEANUP(struct s2n_blob signed_content = { 0 }, s2n_free);
uint32_t maximum_signature_length = 0;
- GUARD_RESULT(s2n_pkey_size(pkey, &maximum_signature_length));
- GUARD_AS_RESULT(s2n_alloc(&signed_content, maximum_signature_length));
+ RESULT_GUARD(s2n_pkey_size(pkey, &maximum_signature_length));
+ RESULT_GUARD_POSIX(s2n_alloc(&signed_content, maximum_signature_length));
- GUARD_AS_RESULT(s2n_pkey_sign(pkey, sig_alg, digest, &signed_content));
+ RESULT_GUARD_POSIX(s2n_pkey_sign(pkey, sig_alg, digest, &signed_content));
- GUARD_AS_RESULT(on_complete(conn, &signed_content));
+ RESULT_GUARD_POSIX(on_complete(conn, &signed_content));
return S2N_RESULT_OK;
}
int s2n_async_pkey_op_perform(struct s2n_async_pkey_op *op, s2n_cert_private_key *key)
{
- ENSURE_POSIX_REF(op);
- ENSURE_POSIX_REF(key);
- ENSURE_POSIX(!op->complete, S2N_ERR_ASYNC_ALREADY_PERFORMED);
+ POSIX_ENSURE_REF(op);
+ POSIX_ENSURE_REF(key);
+ POSIX_ENSURE(!op->complete, S2N_ERR_ASYNC_ALREADY_PERFORMED);
const struct s2n_async_pkey_op_actions *actions = NULL;
- GUARD_AS_POSIX(s2n_async_get_actions(op->type, &actions));
- notnull_check(actions);
+ POSIX_GUARD_RESULT(s2n_async_get_actions(op->type, &actions));
+ POSIX_ENSURE_REF(actions);
- GUARD_AS_POSIX(actions->perform(op, key));
+ POSIX_GUARD_RESULT(actions->perform(op, key));
op->complete = true;
@@ -292,52 +305,51 @@ int s2n_async_pkey_op_perform(struct s2n_async_pkey_op *op, s2n_cert_private_key
int s2n_async_pkey_op_apply(struct s2n_async_pkey_op *op, struct s2n_connection *conn)
{
- ENSURE_POSIX_REF(op);
- ENSURE_POSIX_REF(conn);
- ENSURE_POSIX(op->complete, S2N_ERR_ASYNC_NOT_PERFORMED);
- ENSURE_POSIX(!op->applied, S2N_ERR_ASYNC_ALREADY_APPLIED);
+ POSIX_ENSURE_REF(op);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE(op->complete, S2N_ERR_ASYNC_NOT_PERFORMED);
+ POSIX_ENSURE(!op->applied, S2N_ERR_ASYNC_ALREADY_APPLIED);
/* We could have just used op->conn and removed a conn argument, but we want caller
* to be explicit about connection it wants to resume. Plus this gives more
* protections in cases if caller frees connection object and then tries to resume
* the connection. */
- ENSURE_POSIX(op->conn == conn, S2N_ERR_ASYNC_WRONG_CONNECTION);
- ENSURE_POSIX(conn->handshake.async_state != S2N_ASYNC_INVOKING_CALLBACK, S2N_ERR_ASYNC_APPLY_WHILE_INVOKING);
- ENSURE_POSIX(conn->handshake.async_state == S2N_ASYNC_INVOKED_WAITING, S2N_ERR_ASYNC_WRONG_CONNECTION);
+ POSIX_ENSURE(op->conn == conn, S2N_ERR_ASYNC_WRONG_CONNECTION);
+ POSIX_ENSURE(conn->handshake.async_state == S2N_ASYNC_INVOKED, S2N_ERR_ASYNC_WRONG_CONNECTION);
const struct s2n_async_pkey_op_actions *actions = NULL;
- GUARD_AS_POSIX(s2n_async_get_actions(op->type, &actions));
- notnull_check(actions);
+ POSIX_GUARD_RESULT(s2n_async_get_actions(op->type, &actions));
+ POSIX_ENSURE_REF(actions);
- GUARD_AS_POSIX(actions->apply(op, conn));
+ POSIX_GUARD_RESULT(actions->apply(op, conn));
op->applied = true;
- conn->handshake.async_state = S2N_ASYNC_INVOKED_COMPLETE;
+ conn->handshake.async_state = S2N_ASYNC_COMPLETE;
/* Free up the decrypt/sign structs to avoid storing secrets for too long */
- GUARD_AS_POSIX(actions->free(op));
+ POSIX_GUARD_RESULT(actions->free(op));
return S2N_SUCCESS;
}
int s2n_async_pkey_op_free(struct s2n_async_pkey_op *op)
{
- ENSURE_POSIX_REF(op);
+ POSIX_ENSURE_REF(op);
const struct s2n_async_pkey_op_actions *actions = NULL;
- GUARD_AS_POSIX(s2n_async_get_actions(op->type, &actions));
- notnull_check(actions);
+ POSIX_GUARD_RESULT(s2n_async_get_actions(op->type, &actions));
+ POSIX_ENSURE_REF(actions);
/* If applied the decrypt/sign structs were released in apply call */
- if (!op->applied) { GUARD_AS_POSIX(actions->free(op)); }
+ if (!op->applied) { POSIX_GUARD_RESULT(actions->free(op)); }
- GUARD_POSIX(s2n_free_object(( uint8_t ** )&op, sizeof(struct s2n_async_pkey_op)));
+ POSIX_GUARD(s2n_free_object(( uint8_t ** )&op, sizeof(struct s2n_async_pkey_op)));
return S2N_SUCCESS;
}
S2N_RESULT s2n_async_pkey_decrypt_perform(struct s2n_async_pkey_op *op, s2n_cert_private_key *pkey)
{
- ENSURE_REF(op);
- ENSURE_REF(pkey);
+ RESULT_ENSURE_REF(op);
+ RESULT_ENSURE_REF(pkey);
struct s2n_async_pkey_decrypt_data *decrypt = &op->op.decrypt;
@@ -348,66 +360,259 @@ S2N_RESULT s2n_async_pkey_decrypt_perform(struct s2n_async_pkey_op *op, s2n_cert
S2N_RESULT s2n_async_pkey_decrypt_apply(struct s2n_async_pkey_op *op, struct s2n_connection *conn)
{
- ENSURE_REF(op);
- ENSURE_REF(conn);
+ RESULT_ENSURE_REF(op);
+ RESULT_ENSURE_REF(conn);
struct s2n_async_pkey_decrypt_data *decrypt = &op->op.decrypt;
- GUARD_AS_RESULT(decrypt->on_complete(conn, decrypt->rsa_failed, &decrypt->decrypted));
+ RESULT_GUARD_POSIX(decrypt->on_complete(conn, decrypt->rsa_failed, &decrypt->decrypted));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_async_pkey_decrypt_free(struct s2n_async_pkey_op *op)
{
- ENSURE_REF(op);
+ RESULT_ENSURE_REF(op);
struct s2n_async_pkey_decrypt_data *decrypt = &op->op.decrypt;
- GUARD_AS_RESULT(s2n_blob_zero(&decrypt->decrypted));
- GUARD_AS_RESULT(s2n_blob_zero(&decrypt->encrypted));
- GUARD_AS_RESULT(s2n_free(&decrypt->decrypted));
- GUARD_AS_RESULT(s2n_free(&decrypt->encrypted));
+ RESULT_GUARD_POSIX(s2n_blob_zero(&decrypt->decrypted));
+ RESULT_GUARD_POSIX(s2n_blob_zero(&decrypt->encrypted));
+ RESULT_GUARD_POSIX(s2n_free(&decrypt->decrypted));
+ RESULT_GUARD_POSIX(s2n_free(&decrypt->encrypted));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_async_pkey_sign_perform(struct s2n_async_pkey_op *op, s2n_cert_private_key *pkey)
{
- ENSURE_REF(op);
- ENSURE_REF(pkey);
+ RESULT_ENSURE_REF(op);
+ RESULT_ENSURE_REF(op->conn);
+ RESULT_ENSURE_REF(op->conn->config);
+ RESULT_ENSURE_REF(pkey);
struct s2n_async_pkey_sign_data *sign = &op->op.sign;
uint32_t maximum_signature_length = 0;
- GUARD_RESULT(s2n_pkey_size(pkey, &maximum_signature_length));
- GUARD_AS_RESULT(s2n_alloc(&sign->signature, maximum_signature_length));
+ RESULT_GUARD(s2n_pkey_size(pkey, &maximum_signature_length));
+ RESULT_GUARD_POSIX(s2n_alloc(&sign->signature, maximum_signature_length));
- GUARD_AS_RESULT(s2n_pkey_sign(pkey, sign->sig_alg, &sign->digest, &sign->signature));
+ /* If validation mode is S2N_ASYNC_PKEY_VALIDATION_STRICT
+ * then use local hash copy to sign the signature */
+ if (op->validation_mode == S2N_ASYNC_PKEY_VALIDATION_STRICT) {
+ DEFER_CLEANUP(struct s2n_hash_state hash_state_copy, s2n_hash_free);
+ RESULT_GUARD_POSIX(s2n_hash_new(&hash_state_copy));
+ RESULT_GUARD_POSIX(s2n_hash_copy(&hash_state_copy, &sign->digest));
+
+ RESULT_GUARD_POSIX(s2n_pkey_sign(pkey, sign->sig_alg, &hash_state_copy, &sign->signature));
+ } else {
+ RESULT_GUARD_POSIX(s2n_pkey_sign(pkey, sign->sig_alg, &sign->digest, &sign->signature));
+ }
return S2N_RESULT_OK;
}
S2N_RESULT s2n_async_pkey_sign_apply(struct s2n_async_pkey_op *op, struct s2n_connection *conn)
{
- ENSURE_REF(op);
- ENSURE_REF(conn);
+ RESULT_ENSURE_REF(op);
+ RESULT_ENSURE_REF(conn);
struct s2n_async_pkey_sign_data *sign = &op->op.sign;
- GUARD_AS_RESULT(sign->on_complete(conn, &sign->signature));
+ /* Perform signature validation only if validation feature is opt in */
+ if (op->validation_mode == S2N_ASYNC_PKEY_VALIDATION_STRICT) {
+ RESULT_GUARD(s2n_async_pkey_verify_signature(conn, sign->sig_alg, &sign->digest, &sign->signature));
+ }
+
+ RESULT_GUARD_POSIX(sign->on_complete(conn, &sign->signature));
+
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_async_pkey_verify_signature(struct s2n_connection *conn, s2n_signature_algorithm sig_alg,
+ struct s2n_hash_state *digest, struct s2n_blob *signature) {
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn->handshake_params.our_chain_and_key);
+ RESULT_ENSURE_REF(digest);
+ RESULT_ENSURE_REF(signature);
+
+ /* Parse public key for the cert */
+ DEFER_CLEANUP(struct s2n_pkey public_key = {0}, s2n_pkey_free);
+ s2n_pkey_type pkey_type = S2N_PKEY_TYPE_UNKNOWN;
+ RESULT_GUARD_POSIX(s2n_asn1der_to_public_key_and_type(&public_key, &pkey_type,
+ &conn->handshake_params.our_chain_and_key->cert_chain->head->raw));
+ RESULT_ENSURE(s2n_pkey_verify(&public_key, sig_alg, digest, signature) == S2N_SUCCESS, S2N_ERR_VERIFY_SIGNATURE);
return S2N_RESULT_OK;
}
S2N_RESULT s2n_async_pkey_sign_free(struct s2n_async_pkey_op *op)
{
- ENSURE_REF(op);
+ RESULT_ENSURE_REF(op);
+
+ struct s2n_async_pkey_sign_data *sign = &op->op.sign;
+
+ RESULT_GUARD_POSIX(s2n_hash_free(&sign->digest));
+ RESULT_GUARD_POSIX(s2n_free(&sign->signature));
+
+ return S2N_RESULT_OK;
+}
+
+int s2n_async_pkey_op_set_validation_mode(struct s2n_async_pkey_op *op, s2n_async_pkey_validation_mode mode)
+{
+ POSIX_ENSURE_REF(op);
+
+ switch(mode) {
+ case S2N_ASYNC_PKEY_VALIDATION_FAST:
+ case S2N_ASYNC_PKEY_VALIDATION_STRICT:
+ op->validation_mode = mode;
+ return S2N_SUCCESS;
+ }
+
+ POSIX_BAIL(S2N_ERR_INVALID_ARGUMENT);
+}
+
+int s2n_async_pkey_op_get_op_type(struct s2n_async_pkey_op *op, s2n_async_pkey_op_type *type)
+{
+ POSIX_ENSURE_REF(op);
+ POSIX_ENSURE_REF(type);
+
+ *type = op->type;
+
+ return S2N_SUCCESS;
+}
+
+int s2n_async_pkey_op_get_input_size(struct s2n_async_pkey_op *op, uint32_t *data_len)
+{
+ POSIX_ENSURE_REF(op);
+ POSIX_ENSURE_REF(data_len);
+
+ const struct s2n_async_pkey_op_actions *actions = NULL;
+ POSIX_GUARD_RESULT(s2n_async_get_actions(op->type, &actions));
+ POSIX_ENSURE_REF(actions);
+
+ POSIX_GUARD_RESULT(actions->get_input_size(op, data_len));
+
+ return S2N_SUCCESS;
+}
+
+static S2N_RESULT s2n_async_pkey_get_input_size_decrypt(struct s2n_async_pkey_op *op, uint32_t *data_len)
+{
+ RESULT_ENSURE_REF(op);
+ RESULT_ENSURE_REF(data_len);
+
+ struct s2n_async_pkey_decrypt_data *decrypt = &op->op.decrypt;
+ struct s2n_blob *in = &decrypt->encrypted;
+
+ *data_len = in->size;
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_async_pkey_get_input_size_sign(struct s2n_async_pkey_op *op, uint32_t *data_len)
+{
+ RESULT_ENSURE_REF(op);
+ RESULT_ENSURE_REF(data_len);
+
+ struct s2n_async_pkey_sign_data *sign = &op->op.sign;
+ struct s2n_hash_state *digest = &sign->digest;
+
+ uint8_t digest_length = 0;
+ RESULT_GUARD_POSIX(s2n_hash_digest_size(digest->alg, &digest_length));
+
+ *data_len = digest_length;
+
+ return S2N_RESULT_OK;
+}
+
+int s2n_async_pkey_op_get_input(struct s2n_async_pkey_op *op, uint8_t *data, uint32_t data_len)
+{
+ POSIX_ENSURE_REF(op);
+ POSIX_ENSURE_REF(data);
+
+ const struct s2n_async_pkey_op_actions *actions = NULL;
+ POSIX_GUARD_RESULT(s2n_async_get_actions(op->type, &actions));
+ POSIX_ENSURE_REF(actions);
+
+ POSIX_GUARD_RESULT(actions->get_input(op, data, data_len));
+
+ return S2N_SUCCESS;
+}
+
+static S2N_RESULT s2n_async_pkey_get_input_decrypt(struct s2n_async_pkey_op *op, uint8_t *data, uint32_t data_len)
+{
+ RESULT_ENSURE_REF(op);
+ RESULT_ENSURE_REF(data);
+
+ struct s2n_async_pkey_decrypt_data *decrypt = &op->op.decrypt;
+ struct s2n_blob *in = &decrypt->encrypted;
+
+ RESULT_ENSURE_LTE(in->size, data_len);
+
+ RESULT_CHECKED_MEMCPY(data, in->data, in->size);
+
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_async_pkey_get_input_sign(struct s2n_async_pkey_op *op, uint8_t *data, uint32_t data_len)
+{
+ RESULT_ENSURE_REF(op);
+ RESULT_ENSURE_REF(data);
+
+ struct s2n_async_pkey_sign_data *sign = &op->op.sign;
+
+ DEFER_CLEANUP(struct s2n_hash_state digest_copy = { 0 }, s2n_hash_free);
+ RESULT_GUARD_POSIX(s2n_hash_new(&digest_copy));
+ RESULT_GUARD_POSIX(s2n_hash_copy(&digest_copy, &sign->digest));
+
+ uint8_t digest_length = 0;
+
+ RESULT_GUARD_POSIX(s2n_hash_digest_size(digest_copy.alg, &digest_length));
+
+ RESULT_ENSURE_LTE(digest_length, data_len);
+ RESULT_GUARD_POSIX(s2n_hash_digest(&digest_copy, data, digest_length));
+
+ return S2N_RESULT_OK;
+}
+
+int s2n_async_pkey_op_set_output(struct s2n_async_pkey_op *op, const uint8_t *data, uint32_t data_len)
+{
+ POSIX_ENSURE_REF(op);
+ POSIX_ENSURE_REF(data);
+
+ const struct s2n_async_pkey_op_actions *actions = NULL;
+ POSIX_GUARD_RESULT(s2n_async_get_actions(op->type, &actions));
+ POSIX_ENSURE_REF(actions);
+
+ POSIX_GUARD_RESULT(actions->set_output(op, data, data_len));
+ op->complete = true;
+
+ return S2N_SUCCESS;
+}
+
+static S2N_RESULT s2n_async_pkey_op_set_output_decrypt(struct s2n_async_pkey_op *op, const uint8_t *data, uint32_t data_len)
+{
+ RESULT_ENSURE_REF(op);
+ RESULT_ENSURE_REF(data);
+
+ struct s2n_async_pkey_decrypt_data *decrypt = &op->op.decrypt;
+ struct s2n_blob *out = &decrypt->decrypted;
+
+ RESULT_GUARD_POSIX(s2n_realloc(out, data_len));
+ RESULT_CHECKED_MEMCPY(out->data, data, data_len);
+
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_async_pkey_op_set_output_sign(struct s2n_async_pkey_op *op, const uint8_t *data, uint32_t data_len)
+{
+ RESULT_ENSURE_REF(op);
+ RESULT_ENSURE_REF(data);
struct s2n_async_pkey_sign_data *sign = &op->op.sign;
+ struct s2n_blob *sigcopy = &sign->signature;
- GUARD_AS_RESULT(s2n_hash_free(&sign->digest));
- GUARD_AS_RESULT(s2n_free(&sign->signature));
+ RESULT_GUARD_POSIX(s2n_realloc(sigcopy, data_len));
+ RESULT_CHECKED_MEMCPY(sigcopy->data, data, data_len);
return S2N_RESULT_OK;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_async_pkey.h b/contrib/restricted/aws/s2n/tls/s2n_async_pkey.h
index 2ef8386d2f..631a186a9a 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_async_pkey.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_async_pkey.h
@@ -32,19 +32,17 @@ struct s2n_async_pkey_op;
#define S2N_ASYNC_PKEY_GUARD(conn) \
do { \
__typeof(conn) __tmp_conn = (conn); \
- GUARD_NONNULL(__tmp_conn); \
+ POSIX_GUARD_PTR(__tmp_conn); \
switch (conn->handshake.async_state) { \
case S2N_ASYNC_NOT_INVOKED: \
break; \
\
- case S2N_ASYNC_INVOKING_CALLBACK: \
- case S2N_ASYNC_INVOKED_WAITING: \
- BAIL_POSIX(S2N_ERR_ASYNC_BLOCKED); \
+ case S2N_ASYNC_INVOKED: \
+ POSIX_BAIL(S2N_ERR_ASYNC_BLOCKED); \
\
- case S2N_ASYNC_INVOKED_COMPLETE: \
+ case S2N_ASYNC_COMPLETE: \
/* clean up state and return a success from handler */ \
__tmp_conn->handshake.async_state = S2N_ASYNC_NOT_INVOKED; \
- GUARD(s2n_conn_clear_handshake_read_block(__tmp_conn)); \
return S2N_SUCCESS; \
} \
} while (0)
@@ -56,15 +54,23 @@ struct s2n_async_pkey_op;
* call, we use a macro which directly returns the result of s2n_async* operation forcing compiler to error out on
* unreachable code and forcing developer to use on_complete function instead */
#define S2N_ASYNC_PKEY_DECRYPT(conn, encrypted, init_decrypted, on_complete) \
- return S2N_RESULT_TO_POSIX(s2n_async_pkey_decrypt(conn, encrypted, init_decrypted, on_complete));
+ return s2n_result_is_ok(s2n_async_pkey_decrypt(conn, encrypted, init_decrypted, on_complete)) ? S2N_SUCCESS : S2N_FAILURE;
#define S2N_ASYNC_PKEY_SIGN(conn, sig_alg, digest, on_complete) \
- return S2N_RESULT_TO_POSIX(s2n_async_pkey_sign(conn, sig_alg, digest, on_complete));
+ return s2n_result_is_ok(s2n_async_pkey_sign(conn, sig_alg, digest, on_complete)) ? S2N_SUCCESS : S2N_FAILURE;
int s2n_async_pkey_op_perform(struct s2n_async_pkey_op *op, s2n_cert_private_key *key);
int s2n_async_pkey_op_apply(struct s2n_async_pkey_op *op, struct s2n_connection *conn);
int s2n_async_pkey_op_free(struct s2n_async_pkey_op *op);
+int s2n_async_pkey_op_get_op_type(struct s2n_async_pkey_op *op, s2n_async_pkey_op_type *type);
+int s2n_async_pkey_op_get_input_size(struct s2n_async_pkey_op *op, uint32_t *data_len);
+int s2n_async_pkey_op_get_input(struct s2n_async_pkey_op *op, uint8_t *data, uint32_t data_len);
+int s2n_async_pkey_op_set_output(struct s2n_async_pkey_op *op, const uint8_t *data, uint32_t data_len);
+int s2n_async_pkey_op_set_validation_mode(struct s2n_async_pkey_op *op, s2n_async_pkey_validation_mode mode);
+
+S2N_RESULT s2n_async_pkey_verify_signature(struct s2n_connection *conn, s2n_signature_algorithm sig_alg,
+ struct s2n_hash_state *digest, struct s2n_blob *signature);
S2N_RESULT s2n_async_pkey_decrypt(struct s2n_connection *conn, struct s2n_blob *encrypted, struct s2n_blob *init_decrypted,
s2n_async_pkey_decrypt_complete on_complete);
S2N_RESULT s2n_async_pkey_sign(struct s2n_connection *conn, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest,
diff --git a/contrib/restricted/aws/s2n/tls/s2n_auth_selection.c b/contrib/restricted/aws/s2n/tls/s2n_auth_selection.c
index 33404c811b..eb132cc642 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_auth_selection.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_auth_selection.c
@@ -40,7 +40,7 @@
* TLS1.2 cipher suites.
*/
-static int s2n_get_auth_method_for_cert_type(s2n_pkey_type cert_type, s2n_authentication_method *auth_method)
+int s2n_get_auth_method_for_cert_type(s2n_pkey_type cert_type, s2n_authentication_method *auth_method)
{
switch(cert_type) {
case S2N_PKEY_TYPE_RSA:
@@ -52,9 +52,9 @@ static int s2n_get_auth_method_for_cert_type(s2n_pkey_type cert_type, s2n_authen
return S2N_SUCCESS;
case S2N_PKEY_TYPE_UNKNOWN:
case S2N_PKEY_TYPE_SENTINEL:
- S2N_ERROR(S2N_ERR_CERT_TYPE_UNSUPPORTED);
+ POSIX_BAIL(S2N_ERR_CERT_TYPE_UNSUPPORTED);
}
- S2N_ERROR(S2N_ERR_CERT_TYPE_UNSUPPORTED);
+ POSIX_BAIL(S2N_ERR_CERT_TYPE_UNSUPPORTED);
}
static int s2n_get_cert_type_for_sig_alg(s2n_signature_algorithm sig_alg, s2n_pkey_type *cert_type)
@@ -71,17 +71,17 @@ static int s2n_get_cert_type_for_sig_alg(s2n_signature_algorithm sig_alg, s2n_pk
*cert_type = S2N_PKEY_TYPE_RSA_PSS;
return S2N_SUCCESS;
case S2N_SIGNATURE_ANONYMOUS:
- S2N_ERROR(S2N_ERR_INVALID_SIGNATURE_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_INVALID_SIGNATURE_ALGORITHM);
}
- S2N_ERROR(S2N_ERR_INVALID_SIGNATURE_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_INVALID_SIGNATURE_ALGORITHM);
}
static int s2n_is_sig_alg_valid_for_cipher_suite(s2n_signature_algorithm sig_alg, struct s2n_cipher_suite *cipher_suite)
{
- notnull_check(cipher_suite);
+ POSIX_ENSURE_REF(cipher_suite);
s2n_pkey_type cert_type_for_sig_alg;
- GUARD(s2n_get_cert_type_for_sig_alg(sig_alg, &cert_type_for_sig_alg));
+ POSIX_GUARD(s2n_get_cert_type_for_sig_alg(sig_alg, &cert_type_for_sig_alg));
/* Non-ephemeral key exchange methods require encryption, and RSA-PSS certificates
* do not support encryption.
@@ -90,7 +90,7 @@ static int s2n_is_sig_alg_valid_for_cipher_suite(s2n_signature_algorithm sig_alg
* algorithm that requires RSA-PSS certificates is not valid.
*/
if (cipher_suite->key_exchange_alg != NULL && !cipher_suite->key_exchange_alg->is_ephemeral) {
- ne_check(cert_type_for_sig_alg, S2N_PKEY_TYPE_RSA_PSS);
+ POSIX_ENSURE_NE(cert_type_for_sig_alg, S2N_PKEY_TYPE_RSA_PSS);
}
/* If a cipher suite includes an auth method, then the signature algorithm
@@ -98,8 +98,8 @@ static int s2n_is_sig_alg_valid_for_cipher_suite(s2n_signature_algorithm sig_alg
*/
if (cipher_suite->auth_method != S2N_AUTHENTICATION_METHOD_SENTINEL) {
s2n_authentication_method auth_method_for_sig_alg;
- GUARD(s2n_get_auth_method_for_cert_type(cert_type_for_sig_alg, &auth_method_for_sig_alg));
- eq_check(cipher_suite->auth_method, auth_method_for_sig_alg);
+ POSIX_GUARD(s2n_get_auth_method_for_cert_type(cert_type_for_sig_alg, &auth_method_for_sig_alg));
+ POSIX_ENSURE_EQ(cipher_suite->auth_method, auth_method_for_sig_alg);
}
return S2N_SUCCESS;
@@ -107,22 +107,22 @@ static int s2n_is_sig_alg_valid_for_cipher_suite(s2n_signature_algorithm sig_alg
static int s2n_certs_exist_for_sig_scheme(struct s2n_connection *conn, const struct s2n_signature_scheme *sig_scheme)
{
- notnull_check(sig_scheme);
+ POSIX_ENSURE_REF(sig_scheme);
s2n_pkey_type cert_type;
- GUARD(s2n_get_cert_type_for_sig_alg(sig_scheme->sig_alg, &cert_type));
+ POSIX_GUARD(s2n_get_cert_type_for_sig_alg(sig_scheme->sig_alg, &cert_type));
/* A valid cert must exist for the authentication method. */
struct s2n_cert_chain_and_key *cert = s2n_get_compatible_cert_chain_and_key(conn, cert_type);
- notnull_check(cert);
+ POSIX_ENSURE_REF(cert);
/* For sig_algs that include a curve, the group must also match. */
if (sig_scheme->signature_curve != NULL) {
- notnull_check(cert->private_key);
- notnull_check(cert->cert_chain);
- notnull_check(cert->cert_chain->head);
- eq_check(cert->cert_chain->head->pkey_type, S2N_PKEY_TYPE_ECDSA);
- GUARD(s2n_ecdsa_pkey_matches_curve(&cert->private_key->key.ecdsa_key, sig_scheme->signature_curve));
+ POSIX_ENSURE_REF(cert->private_key);
+ POSIX_ENSURE_REF(cert->cert_chain);
+ POSIX_ENSURE_REF(cert->cert_chain->head);
+ POSIX_ENSURE_EQ(cert->cert_chain->head->pkey_type, S2N_PKEY_TYPE_ECDSA);
+ POSIX_ENSURE_EQ(cert->cert_chain->head->ec_curve_nid, sig_scheme->signature_curve->libcrypto_nid);
}
return S2N_SUCCESS;
@@ -136,7 +136,7 @@ static int s2n_certs_exist_for_auth_method(struct s2n_connection *conn, s2n_auth
s2n_authentication_method auth_method_for_cert_type;
for (int i = 0; i < S2N_CERT_TYPE_COUNT; i++) {
- GUARD(s2n_get_auth_method_for_cert_type(i, &auth_method_for_cert_type));
+ POSIX_GUARD(s2n_get_auth_method_for_cert_type(i, &auth_method_for_cert_type));
if (auth_method != auth_method_for_cert_type) {
continue;
@@ -146,7 +146,7 @@ static int s2n_certs_exist_for_auth_method(struct s2n_connection *conn, s2n_auth
return S2N_SUCCESS;
}
}
- S2N_ERROR(S2N_ERR_CERT_TYPE_UNSUPPORTED);
+ POSIX_BAIL(S2N_ERR_CERT_TYPE_UNSUPPORTED);
}
/* TLS1.3 ciphers are always valid, as they don't include an auth method.
@@ -158,9 +158,9 @@ static int s2n_certs_exist_for_auth_method(struct s2n_connection *conn, s2n_auth
*/
int s2n_is_cipher_suite_valid_for_auth(struct s2n_connection *conn, struct s2n_cipher_suite *cipher_suite)
{
- notnull_check(cipher_suite);
+ POSIX_ENSURE_REF(cipher_suite);
- GUARD(s2n_certs_exist_for_auth_method(conn, cipher_suite->auth_method));
+ POSIX_GUARD(s2n_certs_exist_for_auth_method(conn, cipher_suite->auth_method));
return S2N_SUCCESS;
}
@@ -174,17 +174,17 @@ int s2n_is_cipher_suite_valid_for_auth(struct s2n_connection *conn, struct s2n_c
*/
int s2n_is_sig_scheme_valid_for_auth(struct s2n_connection *conn, const struct s2n_signature_scheme *sig_scheme)
{
- notnull_check(conn);
- notnull_check(sig_scheme);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(sig_scheme);
struct s2n_cipher_suite *cipher_suite = conn->secure.cipher_suite;
- notnull_check(cipher_suite);
+ POSIX_ENSURE_REF(cipher_suite);
- GUARD(s2n_certs_exist_for_sig_scheme(conn, sig_scheme));
+ POSIX_GUARD(s2n_certs_exist_for_sig_scheme(conn, sig_scheme));
/* For the client side, signature algorithm does not need to match the cipher suite. */
if (conn->mode == S2N_SERVER) {
- GUARD(s2n_is_sig_alg_valid_for_cipher_suite(sig_scheme->sig_alg, cipher_suite));
+ POSIX_GUARD(s2n_is_sig_alg_valid_for_cipher_suite(sig_scheme->sig_alg, cipher_suite));
}
return S2N_SUCCESS;
}
@@ -200,11 +200,11 @@ int s2n_is_sig_scheme_valid_for_auth(struct s2n_connection *conn, const struct s
*/
int s2n_is_cert_type_valid_for_auth(struct s2n_connection *conn, s2n_pkey_type cert_type)
{
- notnull_check(conn);
- notnull_check(conn->secure.cipher_suite);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->secure.cipher_suite);
s2n_authentication_method auth_method;
- GUARD(s2n_get_auth_method_for_cert_type(cert_type, &auth_method));
+ POSIX_GUARD(s2n_get_auth_method_for_cert_type(cert_type, &auth_method));
if (conn->secure.cipher_suite->auth_method != S2N_AUTHENTICATION_METHOD_SENTINEL) {
S2N_ERROR_IF(auth_method != conn->secure.cipher_suite->auth_method, S2N_ERR_CERT_TYPE_UNSUPPORTED);
@@ -219,10 +219,10 @@ int s2n_is_cert_type_valid_for_auth(struct s2n_connection *conn, s2n_pkey_type c
*/
int s2n_select_certs_for_server_auth(struct s2n_connection *conn, struct s2n_cert_chain_and_key **chosen_certs)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
s2n_pkey_type cert_type;
- GUARD(s2n_get_cert_type_for_sig_alg(conn->secure.conn_sig_scheme.sig_alg, &cert_type));
+ POSIX_GUARD(s2n_get_cert_type_for_sig_alg(conn->handshake_params.conn_sig_scheme.sig_alg, &cert_type));
*chosen_certs = s2n_get_compatible_cert_chain_and_key(conn, cert_type);
S2N_ERROR_IF(*chosen_certs == NULL, S2N_ERR_CERT_TYPE_UNSUPPORTED);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_auth_selection.h b/contrib/restricted/aws/s2n/tls/s2n_auth_selection.h
index 370f00c4f8..b52736d7f6 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_auth_selection.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_auth_selection.h
@@ -20,6 +20,7 @@
#include "crypto/s2n_certificate.h"
#include "crypto/s2n_signature.h"
+int s2n_get_auth_method_for_cert_type(s2n_pkey_type cert_type, s2n_authentication_method *auth_method);
int s2n_is_cipher_suite_valid_for_auth(struct s2n_connection *conn, struct s2n_cipher_suite *cipher_suite);
int s2n_is_sig_scheme_valid_for_auth(struct s2n_connection *conn, const struct s2n_signature_scheme *sig_scheme);
int s2n_is_cert_type_valid_for_auth(struct s2n_connection *conn, s2n_pkey_type cert_type);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_cbc.c b/contrib/restricted/aws/s2n/tls/s2n_cbc.c
index b2ae713e8e..401ab76072 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_cbc.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_cbc.c
@@ -23,6 +23,7 @@
#include "crypto/s2n_hmac.h"
+#include "tls/s2n_connection.h"
#include "tls/s2n_record.h"
/* A TLS CBC record looks like ..
@@ -47,18 +48,12 @@
*/
int s2n_verify_cbc(struct s2n_connection *conn, struct s2n_hmac_state *hmac, struct s2n_blob *decrypted)
{
- /* Set up MAC copy workspace */
- struct s2n_hmac_state *copy = &conn->client->record_mac_copy_workspace;
- if (conn->mode == S2N_CLIENT) {
- copy = &conn->server->record_mac_copy_workspace;
- }
-
uint8_t mac_digest_size;
- GUARD(s2n_hmac_digest_size(hmac->alg, &mac_digest_size));
+ POSIX_GUARD(s2n_hmac_digest_size(hmac->alg, &mac_digest_size));
/* The record has to be at least big enough to contain the MAC,
* plus the padding length byte */
- gt_check(decrypted->size, mac_digest_size);
+ POSIX_ENSURE_GT(decrypted->size, mac_digest_size);
int payload_and_padding_size = decrypted->size - mac_digest_size;
@@ -68,18 +63,22 @@ int s2n_verify_cbc(struct s2n_connection *conn, struct s2n_hmac_state *hmac, str
int payload_length = MAX(payload_and_padding_size - padding_length - 1, 0);
/* Update the MAC */
- GUARD(s2n_hmac_update(hmac, decrypted->data, payload_length));
- GUARD(s2n_hmac_copy(copy, hmac));
+ POSIX_GUARD(s2n_hmac_update(hmac, decrypted->data, payload_length));
+ int currently_in_hash_block = hmac->currently_in_hash_block;
/* Check the MAC */
uint8_t check_digest[S2N_MAX_DIGEST_LEN];
- lte_check(mac_digest_size, sizeof(check_digest));
- GUARD(s2n_hmac_digest_two_compression_rounds(hmac, check_digest, mac_digest_size));
+ POSIX_ENSURE_LTE(mac_digest_size, sizeof(check_digest));
+ POSIX_GUARD(s2n_hmac_digest_two_compression_rounds(hmac, check_digest, mac_digest_size));
int mismatches = s2n_constant_time_equals(decrypted->data + payload_length, check_digest, mac_digest_size) ^ 1;
- /* Compute a MAC on the rest of the data so that we perform the same number of hash operations */
- GUARD(s2n_hmac_update(copy, decrypted->data + payload_length + mac_digest_size, decrypted->size - payload_length - mac_digest_size - 1));
+ /* Compute a MAC on the rest of the data so that we perform the same number of hash operations.
+ * Include the partial hash block from the first MAC to ensure we use the same number of blocks.
+ */
+ POSIX_GUARD(s2n_hmac_reset(hmac));
+ POSIX_GUARD(s2n_hmac_update(hmac, decrypted->data, currently_in_hash_block));
+ POSIX_GUARD(s2n_hmac_update(hmac, decrypted->data + payload_length + mac_digest_size, decrypted->size - payload_length - mac_digest_size - 1));
/* SSLv3 doesn't specify what the padding should actually be */
if (conn->actual_protocol_version == S2N_SSLv3) {
@@ -90,13 +89,11 @@ int s2n_verify_cbc(struct s2n_connection *conn, struct s2n_hmac_state *hmac, str
int check = MIN(255, (payload_and_padding_size - 1));
int cutoff = check - padding_length;
- for (int i = 0, j = decrypted->size - 1 - check; i < check && j < decrypted->size; i++, j++) {
+ for (uint32_t i = 0, j = decrypted->size - 1 - check; i < check && j < decrypted->size; i++, j++) {
uint8_t mask = ~(0xff << ((i >= cutoff) * 8));
mismatches |= (decrypted->data[j] ^ padding_length) & mask;
}
- GUARD(s2n_hmac_reset(copy));
-
S2N_ERROR_IF(mismatches, S2N_ERR_CBC_VERIFY);
return 0;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_change_cipher_spec.c b/contrib/restricted/aws/s2n/tls/s2n_change_cipher_spec.c
index 19caaa96a1..99dfdfb22a 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_change_cipher_spec.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_change_cipher_spec.c
@@ -32,7 +32,7 @@ int s2n_basic_ccs_recv(struct s2n_connection *conn)
{
uint8_t type;
- GUARD(s2n_stuffer_read_uint8(&conn->handshake.io, &type));
+ POSIX_GUARD(s2n_stuffer_read_uint8(&conn->handshake.io, &type));
S2N_ERROR_IF(type != CHANGE_CIPHER_SPEC_TYPE, S2N_ERR_BAD_MESSAGE);
return 0;
@@ -40,14 +40,14 @@ int s2n_basic_ccs_recv(struct s2n_connection *conn)
int s2n_client_ccs_recv(struct s2n_connection *conn)
{
- GUARD(s2n_basic_ccs_recv(conn));
+ POSIX_GUARD(s2n_basic_ccs_recv(conn));
/* Zero the sequence number */
struct s2n_blob seq = {.data = conn->secure.client_sequence_number,.size = sizeof(conn->secure.client_sequence_number) };
- GUARD(s2n_blob_zero(&seq));
+ POSIX_GUARD(s2n_blob_zero(&seq));
/* Compute the finished message */
- GUARD(s2n_prf_client_finished(conn));
+ POSIX_GUARD(s2n_prf_client_finished(conn));
/* Update the client to use the cipher-suite */
conn->client = &conn->secure;
@@ -55,21 +55,21 @@ int s2n_client_ccs_recv(struct s2n_connection *conn)
/* Flush any partial alert messages that were pending.
* If we don't do this, an attacker can inject a 1-byte alert message into the handshake
* and cause later, valid alerts to be processed incorrectly. */
- GUARD(s2n_stuffer_wipe(&conn->alert_in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->alert_in));
return 0;
}
int s2n_server_ccs_recv(struct s2n_connection *conn)
{
- GUARD(s2n_basic_ccs_recv(conn));
+ POSIX_GUARD(s2n_basic_ccs_recv(conn));
/* Zero the sequence number */
struct s2n_blob seq = {.data = conn->secure.server_sequence_number,.size = sizeof(conn->secure.server_sequence_number) };
- GUARD(s2n_blob_zero(&seq));
+ POSIX_GUARD(s2n_blob_zero(&seq));
/* Compute the finished message */
- GUARD(s2n_prf_server_finished(conn));
+ POSIX_GUARD(s2n_prf_server_finished(conn));
/* Update the secure state to active, and point the client at the active state */
conn->server = &conn->secure;
@@ -77,14 +77,14 @@ int s2n_server_ccs_recv(struct s2n_connection *conn)
/* Flush any partial alert messages that were pending.
* If we don't do this, an attacker can inject a 1-byte alert message into the handshake
* and cause later, valid alerts to be processed incorrectly. */
- GUARD(s2n_stuffer_wipe(&conn->alert_in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->alert_in));
return 0;
}
int s2n_ccs_send(struct s2n_connection *conn)
{
- GUARD(s2n_stuffer_write_uint8(&conn->handshake.io, CHANGE_CIPHER_SPEC_TYPE));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&conn->handshake.io, CHANGE_CIPHER_SPEC_TYPE));
return 0;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_cipher_preferences.c b/contrib/restricted/aws/s2n/tls/s2n_cipher_preferences.c
index 67893009b1..10407f487a 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_cipher_preferences.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_cipher_preferences.c
@@ -14,7 +14,7 @@
*/
#include "tls/s2n_cipher_preferences.h"
-#include <s2n.h>
+#include "api/s2n.h"
#include <stdint.h>
#include <strings.h>
#include "tls/s2n_config.h"
@@ -57,6 +57,31 @@ const struct s2n_cipher_preferences cipher_preferences_20190801 = {
.suites = cipher_suites_20190801,
};
+/* Same as 20190801, but with ECDSA for TLS 1.2 added */
+struct s2n_cipher_suite *cipher_suites_20210831[] = {
+ S2N_TLS13_CLOUDFRONT_CIPHER_SUITES_20200716,
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_chacha20_poly1305_sha256,
+ &s2n_ecdhe_rsa_with_chacha20_poly1305_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_128_cbc_sha
+};
+
+const struct s2n_cipher_preferences cipher_preferences_20210831 = {
+ .count = s2n_array_len(cipher_suites_20210831),
+ .suites = cipher_suites_20210831,
+};
+
/* s2n's list of cipher suites, in order of preference, as of 2014-06-01 */
struct s2n_cipher_suite *cipher_suites_20140601[] = {
&s2n_dhe_rsa_with_aes_128_cbc_sha256,
@@ -311,6 +336,110 @@ const struct s2n_cipher_preferences cipher_preferences_20190214 = {
.suites = cipher_suites_20190214,
};
+/* 20190214 with aes-gcm prioritized above aes-cbc */
+struct s2n_cipher_suite *cipher_suites_20190214_gcm[] = {
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_3des_ede_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_dhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha256,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_20190214_gcm = {
+ .count = s2n_array_len(cipher_suites_20190214_gcm),
+ .suites = cipher_suites_20190214_gcm,
+};
+
+/* Same as cipher_suites_20190214, but with TLS 1.3 Ciphers */
+struct s2n_cipher_suite *cipher_suites_20210825[] = {
+ S2N_TLS13_CLOUDFRONT_CIPHER_SUITES_20200716,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_3des_ede_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_dhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha256,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_20210825 = {
+ .count = s2n_array_len(cipher_suites_20210825),
+ .suites = cipher_suites_20210825,
+};
+
+/* Same as cipher_suites_20190214_gcm, but with TLS 1.3 Ciphers */
+struct s2n_cipher_suite *cipher_suites_20210825_gcm[] = {
+ S2N_TLS13_CLOUDFRONT_CIPHER_SUITES_20200716,
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_3des_ede_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_dhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha256,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_20210825_gcm = {
+ .count = s2n_array_len(cipher_suites_20210825_gcm),
+ .suites = cipher_suites_20210825_gcm,
+};
+
struct s2n_cipher_suite *cipher_suites_null[] = {
&s2n_null_cipher_suite
};
@@ -348,6 +477,34 @@ const struct s2n_cipher_preferences cipher_preferences_20170328 = {
.suites = cipher_suites_20170328,
};
+/* Equivalent to cipher_suites_20170328 with aes-gcm prioritized above aes-cbc */
+struct s2n_cipher_suite *cipher_suites_20170328_gcm[] = {
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_3des_ede_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_dhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha256,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_20170328_gcm = {
+ .count = s2n_array_len(cipher_suites_20170328_gcm),
+ .suites = cipher_suites_20170328_gcm,
+};
+
/* Preferences optimized for FIPS compatibility. */
struct s2n_cipher_suite *cipher_suites_20170405[] = {
&s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
@@ -368,6 +525,26 @@ const struct s2n_cipher_preferences cipher_preferences_20170405 = {
.suites = cipher_suites_20170405,
};
+/* Preferences optimized for FIPS compatibility with GCM prioritized */
+struct s2n_cipher_suite *cipher_suites_20170405_gcm[] = {
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_3des_ede_cbc_sha,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_20170405_gcm = {
+ .count = s2n_array_len(cipher_suites_20170405_gcm),
+ .suites = cipher_suites_20170405_gcm,
+};
+
/* Equivalent to cipher_suite_20160411 with 3DES removed.
* Make a CBC cipher #1 to avoid negotiating GCM with buggy Java clients. */
struct s2n_cipher_suite *cipher_suites_20170718[] = {
@@ -390,6 +567,27 @@ const struct s2n_cipher_preferences cipher_preferences_20170718 = {
.suites = cipher_suites_20170718,
};
+/* Equivalent to cipher_suites_20170718 with aes-gcm prioritized above aes-cbc */
+struct s2n_cipher_suite *cipher_suites_20170718_gcm[] = {
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_20170718_gcm = {
+ .count = s2n_array_len(cipher_suites_20170718_gcm),
+ .suites = cipher_suites_20170718_gcm,
+};
+
struct s2n_cipher_suite *cipher_suites_elb_security_policy_2015_04[] = {
&s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
&s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
@@ -1004,6 +1202,79 @@ const struct s2n_cipher_preferences cipher_preferences_cloudfront_tls_1_2_2021 =
.suites = cipher_suites_cloudfront_tls_1_2_2021,
};
+/* Based on cipher_preferences_cloudfront_tls_1_0_2016, but with ordering changed and AES256-SHA256, DES-CBC3-SHA, and
+ * RC4-MD5 added for compatibility. */
+struct s2n_cipher_suite *cipher_suites_aws_crt_sdk_ssl_v3[] = {
+ S2N_TLS13_CLOUDFRONT_CIPHER_SUITES_20200716,
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_chacha20_poly1305_sha256,
+ &s2n_ecdhe_rsa_with_chacha20_poly1305_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_3des_ede_cbc_sha,
+ &s2n_rsa_with_rc4_128_md5
+};
+
+const struct s2n_cipher_preferences cipher_preferences_aws_crt_sdk_ssl_v3 = {
+ .count = s2n_array_len(cipher_suites_aws_crt_sdk_ssl_v3),
+ .suites = cipher_suites_aws_crt_sdk_ssl_v3,
+};
+
+/* Based on cipher_preferences_cloudfront_tls_1_0_2016, but with ordering changed and AES256-SHA256 added for
+ * compatibility. */
+struct s2n_cipher_suite *cipher_suites_aws_crt_sdk_default[] = {
+ S2N_TLS13_CLOUDFRONT_CIPHER_SUITES_20200716,
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_chacha20_poly1305_sha256,
+ &s2n_ecdhe_rsa_with_chacha20_poly1305_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_256_cbc_sha,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_aws_crt_sdk_default = {
+ .count = s2n_array_len(cipher_suites_aws_crt_sdk_default),
+ .suites = cipher_suites_aws_crt_sdk_default,
+};
+
+struct s2n_cipher_suite *cipher_suites_aws_crt_sdk_tls_13[] = {
+ S2N_TLS13_CLOUDFRONT_CIPHER_SUITES_20200716
+};
+
+const struct s2n_cipher_preferences cipher_preferences_aws_crt_sdk_tls_13 = {
+ .count = s2n_array_len(cipher_suites_aws_crt_sdk_tls_13),
+ .suites = cipher_suites_aws_crt_sdk_tls_13,
+};
+
struct s2n_cipher_suite *cipher_suites_kms_tls_1_0_2018_10[] = {
&s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
&s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
@@ -1022,6 +1293,26 @@ const struct s2n_cipher_preferences cipher_preferences_kms_tls_1_0_2018_10 = {
.suites = cipher_suites_kms_tls_1_0_2018_10,
};
+
+struct s2n_cipher_suite *cipher_suites_kms_tls_1_0_2021_08[] = {
+ S2N_TLS13_CLOUDFRONT_CIPHER_SUITES_20200716,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_3des_ede_cbc_sha,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_kms_tls_1_0_2021_08 = {
+ .count = s2n_array_len(cipher_suites_kms_tls_1_0_2021_08),
+ .suites = cipher_suites_kms_tls_1_0_2021_08,
+};
+
struct s2n_cipher_suite *cipher_suites_kms_pq_tls_1_0_2019_06[] = {
&s2n_ecdhe_bike_rsa_with_aes_256_gcm_sha384,
&s2n_ecdhe_sike_rsa_with_aes_256_gcm_sha384,
@@ -1077,7 +1368,7 @@ const struct s2n_cipher_preferences cipher_preferences_pq_sike_test_tls_1_0_2020
.suites = cipher_suites_pq_sike_test_tls_1_0_2019_11,
};
-/* Includes Both Round 2 and Round 1 PQ Ciphers */
+/* Includes Kyber, BIKE, and SIKE PQ Ciphers */
struct s2n_cipher_suite *cipher_suites_kms_pq_tls_1_0_2020_07[] = {
&s2n_ecdhe_kyber_rsa_with_aes_256_gcm_sha384,
&s2n_ecdhe_bike_rsa_with_aes_256_gcm_sha384,
@@ -1121,6 +1412,306 @@ const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2020_12 = {
.suites = cipher_suites_pq_tls_1_0_2020_12,
};
+/* Same as ELBSecurityPolicy-TLS-1-1-2017-01, but with PQ Ciphers appended to top of preference list */
+struct s2n_cipher_suite *cipher_suites_pq_tls_1_1_2021_05_17[] = {
+ &s2n_ecdhe_kyber_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_bike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_sike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_1_2021_05_17 = {
+ .count = s2n_array_len(cipher_suites_pq_tls_1_1_2021_05_17),
+ .suites = cipher_suites_pq_tls_1_1_2021_05_17,
+};
+
+/* Same as cipher_preferences_20190214, but with PQ Ciphers appended to top of preference list */
+struct s2n_cipher_suite *cipher_suites_pq_tls_1_0_2021_05_18[] = {
+ &s2n_ecdhe_kyber_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_bike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_sike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_3des_ede_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_dhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha256,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_18 = {
+ .count = s2n_array_len(cipher_suites_pq_tls_1_0_2021_05_18),
+ .suites = cipher_suites_pq_tls_1_0_2021_05_18,
+};
+
+/* Same as ELBSecurityPolicy-2016-08, but with PQ Ciphers appended to top of preference list */
+struct s2n_cipher_suite *cipher_suites_pq_tls_1_0_2021_05_19[] = {
+ &s2n_ecdhe_kyber_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_bike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_sike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_19 = {
+ .count = s2n_array_len(cipher_suites_pq_tls_1_0_2021_05_19),
+ .suites = cipher_suites_pq_tls_1_0_2021_05_19,
+};
+
+/* Same as ELBSecurityPolicy-TLS-1-1-2017-01, but with TLS 1.3 and PQ Ciphers appended to top of preference list */
+struct s2n_cipher_suite *cipher_suites_pq_tls_1_1_2021_05_21[] = {
+ /* TLS 1.3 Ciphers don't specify their Key exchange method, allowing for Hybrid PQ KEMs to be negotiated separately */
+ S2N_TLS13_CLOUDFRONT_CIPHER_SUITES_20200716,
+ &s2n_ecdhe_kyber_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_bike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_sike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_1_2021_05_21 = {
+ .count = s2n_array_len(cipher_suites_pq_tls_1_1_2021_05_21),
+ .suites = cipher_suites_pq_tls_1_1_2021_05_21,
+};
+
+/* Same as cipher_preferences_20190214, but with TLS 1.3 and PQ Ciphers appended to top of preference list */
+struct s2n_cipher_suite *cipher_suites_pq_tls_1_0_2021_05_22[] = {
+ /* TLS 1.3 Ciphers don't specify their Key exchange method, allowing for Hybrid PQ KEMs to be negotiated separately */
+ S2N_TLS13_CLOUDFRONT_CIPHER_SUITES_20200716,
+ &s2n_ecdhe_kyber_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_bike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_sike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_3des_ede_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_dhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha256,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_22 = {
+ .count = s2n_array_len(cipher_suites_pq_tls_1_0_2021_05_22),
+ .suites = cipher_suites_pq_tls_1_0_2021_05_22,
+};
+
+/* Same as ELBSecurityPolicy-2016-08, but with TLS 1.3 and PQ Ciphers appended to top of preference list */
+struct s2n_cipher_suite *cipher_suites_pq_tls_1_0_2021_05_23[] = {
+ /* TLS 1.3 Ciphers don't specify their Key exchange method, allowing for Hybrid PQ KEMs to be negotiated separately */
+ S2N_TLS13_CLOUDFRONT_CIPHER_SUITES_20200716,
+ &s2n_ecdhe_kyber_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_bike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_sike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_23 = {
+ .count = s2n_array_len(cipher_suites_pq_tls_1_0_2021_05_23),
+ .suites = cipher_suites_pq_tls_1_0_2021_05_23,
+};
+
+/* Same as cipher_preferences_kms_pq_tls_1_0_2020_07, but with TLS 1.3 appended to top of preference list */
+struct s2n_cipher_suite *cipher_suites_pq_tls_1_0_2021_05_24[] = {
+ /* TLS 1.3 Ciphers don't specify their Key exchange method, allowing for Hybrid PQ KEMs to be negotiated separately */
+ S2N_TLS13_CIPHER_SUITES_20190801,
+ &s2n_ecdhe_kyber_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_bike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_sike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_3des_ede_cbc_sha,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_24 = {
+ .count = s2n_array_len(cipher_suites_pq_tls_1_0_2021_05_24),
+ .suites = cipher_suites_pq_tls_1_0_2021_05_24,
+};
+
+/* Same as 20190214_gcm, but with PQ Ciphers appended to top of preference list */
+struct s2n_cipher_suite *cipher_suites_pq_tls_1_0_2021_05_25[] = {
+ &s2n_ecdhe_kyber_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_bike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_sike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_3des_ede_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_dhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha256,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_25 = {
+ .count = s2n_array_len(cipher_suites_pq_tls_1_0_2021_05_25),
+ .suites = cipher_suites_pq_tls_1_0_2021_05_25,
+};
+
+/* Same as 20190214_gcm, but with TLS 1.3 and PQ Ciphers appended to top of preference list */
+struct s2n_cipher_suite *cipher_suites_pq_tls_1_0_2021_05_26[] = {
+ /* TLS 1.3 Ciphers don't specify their Key exchange method, allowing for Hybrid PQ KEMs to be negotiated separately */
+ S2N_TLS13_CLOUDFRONT_CIPHER_SUITES_20200716,
+ &s2n_ecdhe_kyber_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_bike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_sike_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_rsa_with_aes_128_gcm_sha256,
+ &s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha,
+ &s2n_rsa_with_aes_128_cbc_sha256,
+ &s2n_rsa_with_aes_256_cbc_sha,
+ &s2n_rsa_with_aes_256_cbc_sha256,
+ &s2n_rsa_with_3des_ede_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_dhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha256,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_26 = {
+ .count = s2n_array_len(cipher_suites_pq_tls_1_0_2021_05_26),
+ .suites = cipher_suites_pq_tls_1_0_2021_05_26,
+};
+
struct s2n_cipher_suite *cipher_suites_kms_fips_tls_1_2_2018_10[] = {
&s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
&s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
@@ -1135,4 +1726,44 @@ const struct s2n_cipher_preferences cipher_preferences_kms_fips_tls_1_2_2018_10
.suites = cipher_suites_kms_fips_tls_1_2_2018_10,
};
+struct s2n_cipher_suite *cipher_suites_kms_fips_tls_1_2_2021_08[] = {
+ &s2n_tls13_aes_128_gcm_sha256,
+ &s2n_tls13_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_128_gcm_sha256,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_128_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_256_cbc_sha256,
+ &s2n_dhe_rsa_with_aes_128_cbc_sha256,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_kms_fips_tls_1_2_2021_08 = {
+ .count = s2n_array_len(cipher_suites_kms_fips_tls_1_2_2021_08),
+ .suites = cipher_suites_kms_fips_tls_1_2_2021_08,
+};
+
+struct s2n_cipher_suite *cipher_suites_20210816[] = {
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_20210816 = {
+ .count = s2n_array_len(cipher_suites_20210816),
+ .suites = cipher_suites_20210816,
+};
+
+struct s2n_cipher_suite *cipher_suites_20210816_gcm[] = {
+ &s2n_ecdhe_ecdsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_gcm_sha384,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+};
+
+const struct s2n_cipher_preferences cipher_preferences_20210816_gcm = {
+ .count = s2n_array_len(cipher_suites_20210816_gcm),
+ .suites = cipher_suites_20210816_gcm,
+};
+
/* clang-format on */
diff --git a/contrib/restricted/aws/s2n/tls/s2n_cipher_preferences.h b/contrib/restricted/aws/s2n/tls/s2n_cipher_preferences.h
index 8dabb707ff..7156c68185 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_cipher_preferences.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_cipher_preferences.h
@@ -35,13 +35,23 @@ extern const struct s2n_cipher_preferences cipher_preferences_20160804;
extern const struct s2n_cipher_preferences cipher_preferences_20160824;
extern const struct s2n_cipher_preferences cipher_preferences_20170210;
extern const struct s2n_cipher_preferences cipher_preferences_20170328;
+extern const struct s2n_cipher_preferences cipher_preferences_20170328_gcm;
extern const struct s2n_cipher_preferences cipher_preferences_20170405;
+extern const struct s2n_cipher_preferences cipher_preferences_20170405_gcm;
extern const struct s2n_cipher_preferences cipher_preferences_20170718;
+extern const struct s2n_cipher_preferences cipher_preferences_20170718_gcm;
extern const struct s2n_cipher_preferences cipher_preferences_20190214;
+extern const struct s2n_cipher_preferences cipher_preferences_20190214_gcm;
extern const struct s2n_cipher_preferences cipher_preferences_20190801;
extern const struct s2n_cipher_preferences cipher_preferences_20190120;
extern const struct s2n_cipher_preferences cipher_preferences_20190121;
extern const struct s2n_cipher_preferences cipher_preferences_20190122;
+extern const struct s2n_cipher_preferences cipher_preferences_20210816;
+extern const struct s2n_cipher_preferences cipher_preferences_20210816_gcm;
+extern const struct s2n_cipher_preferences cipher_preferences_20210825;
+extern const struct s2n_cipher_preferences cipher_preferences_20210825_gcm;
+extern const struct s2n_cipher_preferences cipher_preferences_20210831;
+
extern const struct s2n_cipher_preferences cipher_preferences_test_all;
extern const struct s2n_cipher_preferences cipher_preferences_test_all_tls12;
@@ -86,15 +96,31 @@ extern const struct s2n_cipher_preferences cipher_preferences_cloudfront_tls_1_1
extern const struct s2n_cipher_preferences cipher_preferences_cloudfront_tls_1_2_2018_legacy;
extern const struct s2n_cipher_preferences cipher_preferences_cloudfront_tls_1_2_2019_legacy;
-extern const struct s2n_cipher_preferences cipher_preferences_kms_tls_1_0_2018_10;
+/* AWS Common Runtime Cipher Preferences */
+extern const struct s2n_cipher_preferences cipher_preferences_aws_crt_sdk_ssl_v3;
+extern const struct s2n_cipher_preferences cipher_preferences_aws_crt_sdk_default;
+extern const struct s2n_cipher_preferences cipher_preferences_aws_crt_sdk_tls_13;
+/* AWS KMS Cipher Preferences */
+extern const struct s2n_cipher_preferences cipher_preferences_kms_tls_1_0_2018_10;
+extern const struct s2n_cipher_preferences cipher_preferences_kms_tls_1_0_2021_08;
+extern const struct s2n_cipher_preferences cipher_preferences_kms_fips_tls_1_2_2018_10;
+extern const struct s2n_cipher_preferences cipher_preferences_kms_fips_tls_1_2_2021_08;
extern const struct s2n_cipher_preferences cipher_preferences_kms_pq_tls_1_0_2019_06;
extern const struct s2n_cipher_preferences cipher_preferences_kms_pq_tls_1_0_2020_02;
extern const struct s2n_cipher_preferences cipher_preferences_kms_pq_tls_1_0_2020_07;
extern const struct s2n_cipher_preferences cipher_preferences_pq_sike_test_tls_1_0_2019_11;
extern const struct s2n_cipher_preferences cipher_preferences_pq_sike_test_tls_1_0_2020_02;
extern const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2020_12;
+extern const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_1_2021_05_17;
+extern const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_18;
+extern const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_19;
+extern const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_1_2021_05_21;
+extern const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_22;
+extern const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_23;
+extern const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_24;
+extern const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_25;
+extern const struct s2n_cipher_preferences cipher_preferences_pq_tls_1_0_2021_05_26;
-extern const struct s2n_cipher_preferences cipher_preferences_kms_fips_tls_1_2_2018_10;
extern const struct s2n_cipher_preferences cipher_preferences_null;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_cipher_suites.c b/contrib/restricted/aws/s2n/tls/s2n_cipher_suites.c
index ea478e27b8..cf82b7c6f4 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_cipher_suites.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_cipher_suites.c
@@ -263,7 +263,7 @@ struct s2n_cipher_suite s2n_rsa_with_3des_ede_cbc_sha = /* 0x00,0x0A */ {
struct s2n_cipher_suite s2n_dhe_rsa_with_3des_ede_cbc_sha = /* 0x00,0x16 */ {
.available = 0,
- .name = "EDH-RSA-DES-CBC3-SHA",
+ .name = "DHE-RSA-DES-CBC3-SHA",
.iana_value = { TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA },
.key_exchange_alg = &s2n_dhe,
.auth_method = S2N_AUTHENTICATION_RSA,
@@ -994,6 +994,14 @@ const struct s2n_cipher_preferences cipher_preferences_test_all_tls13 = {
.suites = s2n_all_tls13_cipher_suites,
};
+static bool should_init_crypto = true;
+static bool crypto_initialized = false;
+int s2n_crypto_disable_init(void) {
+ POSIX_ENSURE(!crypto_initialized, S2N_ERR_INITIALIZED);
+ should_init_crypto = false;
+ return S2N_SUCCESS;
+}
+
/* Determines cipher suite availability and selects record algorithms */
int s2n_cipher_suites_init(void)
{
@@ -1026,7 +1034,7 @@ int s2n_cipher_suites_init(void)
if (cur_suite->sslv3_record_alg && cur_suite->sslv3_record_alg->cipher->is_available()) {
struct s2n_blob cur_suite_mem = { .data = (uint8_t *) cur_suite, .size = sizeof(struct s2n_cipher_suite) };
struct s2n_blob new_suite_mem = { 0 };
- GUARD(s2n_dup(&cur_suite_mem, &new_suite_mem));
+ POSIX_GUARD(s2n_dup(&cur_suite_mem, &new_suite_mem));
struct s2n_cipher_suite *new_suite = (struct s2n_cipher_suite *)(void *)new_suite_mem.data;
new_suite->available = 1;
@@ -1037,14 +1045,18 @@ int s2n_cipher_suites_init(void)
}
}
+ if (should_init_crypto) {
#if !S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 0)
- /*https://wiki.openssl.org/index.php/Manual:OpenSSL_add_all_algorithms(3)*/
- OpenSSL_add_all_algorithms();
+ /*https://wiki.openssl.org/index.php/Manual:OpenSSL_add_all_algorithms(3)*/
+ OpenSSL_add_all_algorithms();
#else
- OPENSSL_init_crypto(OPENSSL_INIT_LOAD_CRYPTO_STRINGS | OPENSSL_INIT_ADD_ALL_CIPHERS | OPENSSL_INIT_ADD_ALL_DIGESTS, NULL);
+ OPENSSL_init_crypto(OPENSSL_INIT_LOAD_CRYPTO_STRINGS | OPENSSL_INIT_ADD_ALL_CIPHERS | OPENSSL_INIT_ADD_ALL_DIGESTS, NULL);
#endif
+ }
- return 0;
+ crypto_initialized = true;
+
+ return S2N_SUCCESS;
}
/* Reset any selected record algorithms */
@@ -1058,96 +1070,93 @@ int s2n_cipher_suites_cleanup(void)
/* Release custom SSLv3 cipher suites */
if (cur_suite->sslv3_cipher_suite != cur_suite) {
- GUARD(s2n_free_object((uint8_t **)&cur_suite->sslv3_cipher_suite, sizeof(struct s2n_cipher_suite)));
+ POSIX_GUARD(s2n_free_object((uint8_t **)&cur_suite->sslv3_cipher_suite, sizeof(struct s2n_cipher_suite)));
}
cur_suite->sslv3_cipher_suite = NULL;
}
+ if (should_init_crypto) {
#if !S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 0)
- /*https://wiki.openssl.org/index.php/Manual:OpenSSL_add_all_algorithms(3)*/
- EVP_cleanup();
+ /*https://wiki.openssl.org/index.php/Manual:OpenSSL_add_all_algorithms(3)*/
+ EVP_cleanup();
- /* per the reqs here https://www.openssl.org/docs/man1.1.0/crypto/OPENSSL_init_crypto.html we don't explicitly call
- * cleanup in later versions */
+ /* per the reqs here https://www.openssl.org/docs/man1.1.0/crypto/OPENSSL_init_crypto.html we don't explicitly call
+ * cleanup in later versions */
#endif
+ }
return 0;
}
-struct s2n_cipher_suite *s2n_cipher_suite_from_wire(const uint8_t cipher_suite[S2N_TLS_CIPHER_SUITE_LEN])
+S2N_RESULT s2n_cipher_suite_from_iana(const uint8_t iana[static S2N_TLS_CIPHER_SUITE_LEN], struct s2n_cipher_suite **cipher_suite)
{
+ RESULT_ENSURE_REF(cipher_suite);
+ *cipher_suite = NULL;
+ RESULT_ENSURE_REF(iana);
+
int low = 0;
- int top = (sizeof(s2n_all_cipher_suites) / sizeof(struct s2n_cipher_suite *)) - 1;
+ int top = s2n_array_len(s2n_all_cipher_suites) - 1;
+
/* Perform a textbook binary search */
while (low <= top) {
/* Check in the middle */
- int mid = low + ((top - low) / 2);
- int m = memcmp(s2n_all_cipher_suites[mid]->iana_value, cipher_suite, 2);
+ size_t mid = low + ((top - low) / 2);
+ int m = memcmp(s2n_all_cipher_suites[mid]->iana_value, iana, S2N_TLS_CIPHER_SUITE_LEN);
if (m == 0) {
- return s2n_all_cipher_suites[mid];
+ *cipher_suite = s2n_all_cipher_suites[mid];
+ return S2N_RESULT_OK;
} else if (m > 0) {
top = mid - 1;
} else if (m < 0) {
low = mid + 1;
}
}
-
- return NULL;
+ RESULT_BAIL(S2N_ERR_CIPHER_NOT_SUPPORTED);
}
int s2n_set_cipher_as_client(struct s2n_connection *conn, uint8_t wire[S2N_TLS_CIPHER_SUITE_LEN])
{
- notnull_check(conn);
- notnull_check(conn->secure.cipher_suite);
- struct s2n_cipher_suite *cipher_suite;
-
- /* See if the cipher is one we support */
- cipher_suite = s2n_cipher_suite_from_wire(wire);
- ENSURE_POSIX(cipher_suite != NULL, S2N_ERR_CIPHER_NOT_SUPPORTED);
-
- /* From RFC section: https://tools.ietf.org/html/rfc8446#section-4.2.11:
- * Client MUST verify that the server selected a cipher suite indicating a Hash
- * associated with the chosen PSK if it exists.
- * */
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->secure.cipher_suite);
+
+ const struct s2n_security_policy *security_policy;
+ POSIX_GUARD(s2n_connection_get_security_policy(conn, &security_policy));
+ POSIX_ENSURE_REF(security_policy);
+
+ struct s2n_cipher_suite *cipher_suite = NULL;
+ for (size_t i = 0; i < security_policy->cipher_preferences->count; i++) {
+ const uint8_t *ours = security_policy->cipher_preferences->suites[i]->iana_value;
+ if (memcmp(wire, ours, S2N_TLS_CIPHER_SUITE_LEN) == 0) {
+ cipher_suite = security_policy->cipher_preferences->suites[i];
+ break;
+ }
+ }
+ POSIX_ENSURE(cipher_suite != NULL, S2N_ERR_CIPHER_NOT_SUPPORTED);
+ POSIX_ENSURE(cipher_suite->available, S2N_ERR_CIPHER_NOT_SUPPORTED);
+
+ /** Clients MUST verify
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.11
+ *# that the server selected a cipher suite
+ *# indicating a Hash associated with the PSK
+ **/
if (conn->psk_params.chosen_psk) {
- ENSURE_POSIX(cipher_suite->prf_alg == conn->psk_params.chosen_psk->hmac_alg,
+ POSIX_ENSURE(cipher_suite->prf_alg == conn->psk_params.chosen_psk->hmac_alg,
S2N_ERR_CIPHER_NOT_SUPPORTED);
}
/* Verify cipher suite sent in server hello is the same as sent in hello retry */
if (s2n_is_hello_retry_handshake(conn) && !s2n_is_hello_retry_message(conn)) {
- ENSURE_POSIX(conn->secure.cipher_suite->iana_value == cipher_suite->iana_value, S2N_ERR_CIPHER_NOT_SUPPORTED);
+ POSIX_ENSURE(conn->secure.cipher_suite->iana_value == cipher_suite->iana_value, S2N_ERR_CIPHER_NOT_SUPPORTED);
return S2N_SUCCESS;
}
- conn->secure.cipher_suite = cipher_suite;
-
- /* Verify the cipher was part of the originally offered list */
- const struct s2n_cipher_preferences *cipher_prefs;
- GUARD(s2n_connection_get_cipher_preferences(conn, &cipher_prefs));
-
- uint8_t found = 0;
-
- for (int i = 0; i < cipher_prefs->count; i++ ) {
- /* The client sends all "available" ciphers in the preference list to the server.
- The server must pick one of the ciphers offered by the client. */
- if (cipher_prefs->suites[i]->available) {
- const uint8_t *server_iana_value = conn->secure.cipher_suite->iana_value;
- const uint8_t *client_iana_value = cipher_prefs->suites[i]->iana_value;
-
- if (memcmp(server_iana_value, client_iana_value, S2N_TLS_CIPHER_SUITE_LEN) == 0) {
- found = 1;
- break;
- }
- }
- }
- S2N_ERROR_IF(found != 1, S2N_ERR_CIPHER_NOT_SUPPORTED);
+ conn->secure.cipher_suite = cipher_suite;
/* For SSLv3 use SSLv3-specific ciphers */
if (conn->actual_protocol_version == S2N_SSLv3) {
conn->secure.cipher_suite = conn->secure.cipher_suite->sslv3_cipher_suite;
- notnull_check(conn->secure.cipher_suite);
+ POSIX_ENSURE_REF(conn->secure.cipher_suite);
}
return 0;
@@ -1155,7 +1164,7 @@ int s2n_set_cipher_as_client(struct s2n_connection *conn, uint8_t wire[S2N_TLS_C
static int s2n_wire_ciphers_contain(const uint8_t *match, const uint8_t *wire, uint32_t count, uint32_t cipher_suite_len)
{
- for (int i = 0; i < count; i++) {
+ for (uint32_t i = 0; i < count; i++) {
const uint8_t *theirs = wire + (i * cipher_suite_len) + (cipher_suite_len - S2N_TLS_CIPHER_SUITE_LEN);
if (!memcmp(match, theirs, S2N_TLS_CIPHER_SUITE_LEN)) {
@@ -1179,7 +1188,7 @@ static int s2n_set_cipher_as_server(struct s2n_connection *conn, uint8_t *wire,
uint8_t fallback_scsv[S2N_TLS_CIPHER_SUITE_LEN] = { TLS_FALLBACK_SCSV };
if (s2n_wire_ciphers_contain(fallback_scsv, wire, count, cipher_suite_len)) {
conn->closed = 1;
- S2N_ERROR(S2N_ERR_FALLBACK_DETECTED);
+ POSIX_BAIL(S2N_ERR_FALLBACK_DETECTED);
}
}
@@ -1189,7 +1198,7 @@ static int s2n_set_cipher_as_server(struct s2n_connection *conn, uint8_t *wire,
}
const struct s2n_security_policy *security_policy;
- GUARD(s2n_connection_get_security_policy(conn, &security_policy));
+ POSIX_GUARD(s2n_connection_get_security_policy(conn, &security_policy));
/* s2n supports only server order */
for (int i = 0; i < security_policy->cipher_preferences->count; i++) {
@@ -1197,7 +1206,7 @@ static int s2n_set_cipher_as_server(struct s2n_connection *conn, uint8_t *wire,
if (s2n_wire_ciphers_contain(ours, wire, count, cipher_suite_len)) {
/* We have a match */
- struct s2n_cipher_suite *match = s2n_cipher_suite_from_wire(ours);
+ struct s2n_cipher_suite *match = security_policy->cipher_preferences->suites[i];
/* Never use TLS1.3 ciphers on a pre-TLS1.3 connection, and vice versa */
if ((conn->actual_protocol_version >= S2N_TLS13) != (match->minimum_required_tls_version >= S2N_TLS13)) {
@@ -1223,7 +1232,7 @@ static int s2n_set_cipher_as_server(struct s2n_connection *conn, uint8_t *wire,
if (match->minimum_required_tls_version < S2N_TLS13) {
/* If the kex is not supported continue to the next candidate */
bool kex_supported = false;
- GUARD_AS_POSIX(s2n_kex_supported(match, conn, &kex_supported));
+ POSIX_GUARD_RESULT(s2n_kex_supported(match, conn, &kex_supported));
if (!kex_supported) {
continue;
}
@@ -1264,7 +1273,7 @@ static int s2n_set_cipher_as_server(struct s2n_connection *conn, uint8_t *wire,
return S2N_SUCCESS;
}
- S2N_ERROR(S2N_ERR_CIPHER_NOT_SUPPORTED);
+ POSIX_BAIL(S2N_ERR_CIPHER_NOT_SUPPORTED);
}
int s2n_set_cipher_as_sslv2_server(struct s2n_connection *conn, uint8_t *wire, uint16_t count)
@@ -1276,3 +1285,34 @@ int s2n_set_cipher_as_tls_server(struct s2n_connection *conn, uint8_t *wire, uin
{
return s2n_set_cipher_as_server(conn, wire, count, S2N_TLS_CIPHER_SUITE_LEN);
}
+
+bool s2n_cipher_suite_requires_ecc_extension(struct s2n_cipher_suite *cipher)
+{
+ if(!cipher) {
+ return false;
+ }
+
+ /* TLS1.3 does not include key exchange algorithms in its cipher suites,
+ * but the elliptic curves extension is always required. */
+ if (cipher->minimum_required_tls_version >= S2N_TLS13) {
+ return true;
+ }
+
+ if (s2n_kex_includes(cipher->key_exchange_alg, &s2n_ecdhe)) {
+ return true;
+ }
+
+ return false;
+}
+
+bool s2n_cipher_suite_requires_pq_extension(struct s2n_cipher_suite *cipher)
+{
+ if(!cipher) {
+ return false;
+ }
+
+ if (s2n_kex_includes(cipher->key_exchange_alg, &s2n_kem)) {
+ return true;
+ }
+ return false;
+}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_cipher_suites.h b/contrib/restricted/aws/s2n/tls/s2n_cipher_suites.h
index 2bee39ec4c..74a388a9b4 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_cipher_suites.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_cipher_suites.h
@@ -32,10 +32,9 @@
#define S2N_MAX_POSSIBLE_RECORD_ALGS 2
-/* Kept up-to-date by s2n_cipher_suite_match_test */
+/* Kept up-to-date by s2n_cipher_suite_test */
#define S2N_CIPHER_SUITE_COUNT 39
-
/* Record algorithm flags that can be OR'ed */
#define S2N_TLS12_AES_GCM_AEAD_NONCE 0x01
#define S2N_TLS12_CHACHA_POLY_AEAD_NONCE 0x02
@@ -84,6 +83,8 @@ extern const struct s2n_record_algorithm s2n_record_alg_aes256_sha384;
extern const struct s2n_record_algorithm s2n_record_alg_aes128_gcm;
extern const struct s2n_record_algorithm s2n_record_alg_aes256_gcm;
extern const struct s2n_record_algorithm s2n_record_alg_chacha20_poly1305;
+extern const struct s2n_record_algorithm s2n_tls13_record_alg_aes128_gcm;
+extern const struct s2n_record_algorithm s2n_tls13_record_alg_chacha20_poly1305;
struct s2n_cipher_suite {
/* Is there an implementation available? Set in s2n_cipher_suites_init() */
@@ -161,7 +162,9 @@ extern struct s2n_cipher_suite s2n_tls13_chacha20_poly1305_sha256;
extern int s2n_cipher_suites_init(void);
extern int s2n_cipher_suites_cleanup(void);
-extern struct s2n_cipher_suite *s2n_cipher_suite_from_wire(const uint8_t cipher_suite[S2N_TLS_CIPHER_SUITE_LEN]);
+S2N_RESULT s2n_cipher_suite_from_iana(const uint8_t iana[S2N_TLS_CIPHER_SUITE_LEN], struct s2n_cipher_suite **cipher_suite);
extern int s2n_set_cipher_as_client(struct s2n_connection *conn, uint8_t wire[S2N_TLS_CIPHER_SUITE_LEN]);
extern int s2n_set_cipher_as_sslv2_server(struct s2n_connection *conn, uint8_t * wire, uint16_t count);
extern int s2n_set_cipher_as_tls_server(struct s2n_connection *conn, uint8_t * wire, uint16_t count);
+bool s2n_cipher_suite_requires_ecc_extension(struct s2n_cipher_suite *cipher);
+bool s2n_cipher_suite_requires_pq_extension(struct s2n_cipher_suite *cipher);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_client_cert.c b/contrib/restricted/aws/s2n/tls/s2n_client_cert.c
index 7dde226788..757b13430d 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_client_cert.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_client_cert.c
@@ -13,7 +13,7 @@
* permissions and limitations under the License.
*/
-#include <s2n.h>
+#include "api/s2n.h"
#include "crypto/s2n_certificate.h"
#include "error/s2n_errno.h"
@@ -27,32 +27,97 @@
#include "utils/s2n_blob.h"
#include "utils/s2n_safety.h"
+/* In TLS1.2, the certificate list is just an opaque vector of certificates:
+ *
+ * opaque ASN.1Cert<1..2^24-1>;
+ *
+ * struct {
+ * ASN.1Cert certificate_list<0..2^24-1>;
+ * } Certificate;
+ *
+ * This construction allowed us to store the entire certificate_list blob
+ * and return it from the s2n_connection_get_client_cert_chain method for
+ * customers to examine.
+ *
+ * However, TLS1.3 introduced per-certificate extensions:
+ *
+ * struct {
+ * opaque cert_data<1..2^24-1>;
+ * ----> Extension extensions<0..2^16-1>; <----
+ * } CertificateEntry;
+ *
+ * struct {
+ * opaque certificate_request_context<0..2^8-1>;
+ * CertificateEntry certificate_list<0..2^24-1>;
+ * } Certificate;
+ *
+ * So in order to store / return the certificates in the same format as in TLS1.2,
+ * we need to first strip out the extensions.
+ */
+static S2N_RESULT s2n_client_cert_chain_store(struct s2n_connection *conn, struct s2n_blob *client_cert_chain)
+{
+ RESULT_ENSURE_REF(conn);
+
+ /* Earlier versions are a basic copy */
+ if (conn->actual_protocol_version < S2N_TLS13) {
+ RESULT_GUARD_POSIX(s2n_dup(client_cert_chain, &conn->handshake_params.client_cert_chain));
+ return S2N_RESULT_OK;
+ }
+
+ struct s2n_stuffer cert_chain_in = { 0 };
+ RESULT_GUARD_POSIX(s2n_stuffer_init(&cert_chain_in, client_cert_chain));
+ RESULT_GUARD_POSIX(s2n_stuffer_skip_write(&cert_chain_in, client_cert_chain->size));
+
+ struct s2n_stuffer cert_chain_out = { 0 };
+ RESULT_GUARD_POSIX(s2n_realloc(&conn->handshake_params.client_cert_chain, client_cert_chain->size));
+ RESULT_GUARD_POSIX(s2n_stuffer_init(&cert_chain_out, &conn->handshake_params.client_cert_chain));
+
+ uint32_t cert_size = 0;
+ uint16_t extensions_size = 0;
+ while(s2n_stuffer_data_available(&cert_chain_in)) {
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint24(&cert_chain_in, &cert_size));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint24(&cert_chain_out, cert_size));
+ RESULT_GUARD_POSIX(s2n_stuffer_copy(&cert_chain_in, &cert_chain_out, cert_size));
+
+ /* The new TLS1.3 format includes extensions, which we must skip.
+ * Customers will not expect TLS extensions in a DER-encoded certificate.
+ */
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint16(&cert_chain_in, &extensions_size));
+ RESULT_GUARD_POSIX(s2n_stuffer_skip_read(&cert_chain_in, extensions_size));
+ }
+
+ /* We will have allocated more memory than actually necessary.
+ * If this becomes a problem, we should consider reallocing the correct amount of memory here.
+ */
+ conn->handshake_params.client_cert_chain.size = s2n_stuffer_data_available(&cert_chain_out);
+ return S2N_RESULT_OK;
+}
int s2n_client_cert_recv(struct s2n_connection *conn)
{
if (conn->actual_protocol_version == S2N_TLS13) {
uint8_t certificate_request_context_len;
- GUARD(s2n_stuffer_read_uint8(&conn->handshake.io, &certificate_request_context_len));
+ POSIX_GUARD(s2n_stuffer_read_uint8(&conn->handshake.io, &certificate_request_context_len));
S2N_ERROR_IF(certificate_request_context_len != 0,S2N_ERR_BAD_MESSAGE);
}
struct s2n_stuffer *in = &conn->handshake.io;
struct s2n_blob client_cert_chain = {0};
- GUARD(s2n_stuffer_read_uint24(in, &client_cert_chain.size));
+ POSIX_GUARD(s2n_stuffer_read_uint24(in, &client_cert_chain.size));
S2N_ERROR_IF(client_cert_chain.size > s2n_stuffer_data_available(in), S2N_ERR_BAD_MESSAGE);
if (client_cert_chain.size == 0) {
- GUARD(s2n_conn_set_handshake_no_client_cert(conn));
+ POSIX_GUARD(s2n_conn_set_handshake_no_client_cert(conn));
return 0;
}
client_cert_chain.data = s2n_stuffer_raw_read(in, client_cert_chain.size);
- notnull_check(client_cert_chain.data);
+ POSIX_ENSURE_REF(client_cert_chain.data);
s2n_cert_public_key public_key;
- GUARD(s2n_pkey_zero_init(&public_key));
+ POSIX_GUARD(s2n_pkey_zero_init(&public_key));
s2n_pkey_type pkey_type;
@@ -61,14 +126,14 @@ int s2n_client_cert_recv(struct s2n_connection *conn)
client_cert_chain.data, client_cert_chain.size,
&pkey_type, &public_key) != S2N_CERT_OK, S2N_ERR_CERT_UNTRUSTED);
- conn->secure.client_cert_pkey_type = pkey_type;
- GUARD(s2n_pkey_setup_for_type(&public_key, pkey_type));
-
- GUARD(s2n_pkey_check_key_exists(&public_key));
- GUARD(s2n_dup(&client_cert_chain, &conn->secure.client_cert_chain));
- conn->secure.client_public_key = public_key;
+ conn->handshake_params.client_cert_pkey_type = pkey_type;
+ POSIX_GUARD(s2n_pkey_setup_for_type(&public_key, pkey_type));
- return 0;
+ POSIX_GUARD(s2n_pkey_check_key_exists(&public_key));
+ POSIX_GUARD_RESULT(s2n_client_cert_chain_store(conn, &client_cert_chain));
+ conn->handshake_params.client_public_key = public_key;
+
+ return S2N_SUCCESS;
}
@@ -76,7 +141,7 @@ int s2n_client_cert_send(struct s2n_connection *conn)
{
struct s2n_cert_chain_and_key *chain_and_key = conn->handshake_params.our_chain_and_key;
- if (conn->actual_protocol_version == S2N_TLS13) {
+ if (conn->actual_protocol_version >= S2N_TLS13) {
/* If this message is in response to a CertificateRequest, the value of
* certificate_request_context in that message.
* https://tools.ietf.org/html/rfc8446#section-4.4.2
@@ -85,15 +150,15 @@ int s2n_client_cert_send(struct s2n_connection *conn)
* https://tools.ietf.org/html/rfc8446#section-4.3.2
*/
uint8_t certificate_request_context_len = 0;
- GUARD(s2n_stuffer_write_uint8(&conn->handshake.io, certificate_request_context_len));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&conn->handshake.io, certificate_request_context_len));
}
if (chain_and_key == NULL) {
- GUARD(s2n_conn_set_handshake_no_client_cert(conn));
- GUARD(s2n_send_empty_cert_chain(&conn->handshake.io));
+ POSIX_GUARD(s2n_conn_set_handshake_no_client_cert(conn));
+ POSIX_GUARD(s2n_send_empty_cert_chain(&conn->handshake.io));
return 0;
}
- GUARD(s2n_send_cert_chain(conn, &conn->handshake.io, chain_and_key));
- return 0;
+ POSIX_GUARD(s2n_send_cert_chain(conn, &conn->handshake.io, chain_and_key));
+ return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_client_cert_verify.c b/contrib/restricted/aws/s2n/tls/s2n_client_cert_verify.c
index b11150c48c..5327b5bc7e 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_client_cert_verify.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_client_cert_verify.c
@@ -13,7 +13,7 @@
* permissions and limitations under the License.
*/
-#include <s2n.h>
+#include "api/s2n.h"
#include "error/s2n_errno.h"
@@ -25,68 +25,78 @@
#include "stuffer/s2n_stuffer.h"
#include "utils/s2n_safety.h"
+#include "tls/s2n_async_pkey.h"
+static int s2n_client_cert_verify_send_complete(struct s2n_connection *conn, struct s2n_blob *signature);
int s2n_client_cert_verify_recv(struct s2n_connection *conn)
{
+ POSIX_ENSURE_REF(conn);
+ struct s2n_handshake_hashes *hashes = conn->handshake.hashes;
+ POSIX_ENSURE_REF(hashes);
+
struct s2n_stuffer *in = &conn->handshake.io;
- struct s2n_signature_scheme chosen_sig_scheme = s2n_rsa_pkcs1_md5_sha1;
+ struct s2n_signature_scheme *chosen_sig_scheme = &conn->handshake_params.client_cert_sig_scheme;
- if(conn->actual_protocol_version >= S2N_TLS12){
+ if (conn->actual_protocol_version < S2N_TLS12) {
+ POSIX_GUARD(s2n_choose_default_sig_scheme(conn, chosen_sig_scheme, S2N_CLIENT));
+ } else {
/* Verify the SigScheme picked by the Client was in the preference list we sent (or is the default SigScheme) */
- GUARD(s2n_get_and_validate_negotiated_signature_scheme(conn, in, &chosen_sig_scheme));
+ POSIX_GUARD(s2n_get_and_validate_negotiated_signature_scheme(conn, in, chosen_sig_scheme));
}
+
uint16_t signature_size;
struct s2n_blob signature = {0};
- GUARD(s2n_stuffer_read_uint16(in, &signature_size));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &signature_size));
signature.size = signature_size;
signature.data = s2n_stuffer_raw_read(in, signature.size);
- notnull_check(signature.data);
+ POSIX_ENSURE_REF(signature.data);
/* Use a copy of the hash state since the verify digest computation may modify the running hash state we need later. */
- struct s2n_hash_state hash_state = {0};
- GUARD(s2n_handshake_get_hash_state(conn, chosen_sig_scheme.hash_alg, &hash_state));
- GUARD(s2n_hash_copy(&conn->handshake.ccv_hash_copy, &hash_state));
+ struct s2n_hash_state *hash_state = &hashes->hash_workspace;
+ POSIX_GUARD_RESULT(s2n_handshake_copy_hash_state(conn, chosen_sig_scheme->hash_alg, hash_state));
/* Verify the signature */
- GUARD(s2n_pkey_verify(&conn->secure.client_public_key, chosen_sig_scheme.sig_alg, &conn->handshake.ccv_hash_copy, &signature));
+ POSIX_GUARD(s2n_pkey_verify(&conn->handshake_params.client_public_key, chosen_sig_scheme->sig_alg, hash_state, &signature));
/* Client certificate has been verified. Minimize required handshake hash algs */
- GUARD(s2n_conn_update_required_handshake_hashes(conn));
+ POSIX_GUARD(s2n_conn_update_required_handshake_hashes(conn));
- return 0;
+ return S2N_SUCCESS;
}
int s2n_client_cert_verify_send(struct s2n_connection *conn)
{
- struct s2n_stuffer *out = &conn->handshake.io;
+ POSIX_ENSURE_REF(conn);
+ struct s2n_handshake_hashes *hashes = conn->handshake.hashes;
+ POSIX_ENSURE_REF(hashes);
- struct s2n_signature_scheme chosen_sig_scheme = s2n_rsa_pkcs1_md5_sha1;
+ S2N_ASYNC_PKEY_GUARD(conn);
+ struct s2n_stuffer *out = &conn->handshake.io;
- if (conn->actual_protocol_version >= S2N_TLS12) {
- chosen_sig_scheme = conn->secure.client_cert_sig_scheme;
- GUARD(s2n_stuffer_write_uint16(out, conn->secure.client_cert_sig_scheme.iana_value));
+ struct s2n_signature_scheme *chosen_sig_scheme = &conn->handshake_params.client_cert_sig_scheme;
+ if (conn->actual_protocol_version < S2N_TLS12) {
+ POSIX_GUARD(s2n_choose_default_sig_scheme(conn, chosen_sig_scheme, S2N_CLIENT));
+ } else {
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, conn->handshake_params.client_cert_sig_scheme.iana_value));
}
/* Use a copy of the hash state since the verify digest computation may modify the running hash state we need later. */
- struct s2n_hash_state hash_state = {0};
- GUARD(s2n_handshake_get_hash_state(conn, chosen_sig_scheme.hash_alg, &hash_state));
- GUARD(s2n_hash_copy(&conn->handshake.ccv_hash_copy, &hash_state));
+ struct s2n_hash_state *hash_state = &hashes->hash_workspace;
+ POSIX_GUARD_RESULT(s2n_handshake_copy_hash_state(conn, chosen_sig_scheme->hash_alg, hash_state));
- struct s2n_cert_chain_and_key *cert_chain_and_key = conn->handshake_params.our_chain_and_key;
-
- DEFER_CLEANUP(struct s2n_blob signature = {0}, s2n_free);
- uint32_t max_signature_size = 0;
- GUARD_AS_POSIX(s2n_pkey_size(cert_chain_and_key->private_key, &max_signature_size));
- GUARD(s2n_alloc(&signature, max_signature_size));
+ S2N_ASYNC_PKEY_SIGN(conn, chosen_sig_scheme->sig_alg, hash_state, s2n_client_cert_verify_send_complete);
+}
- GUARD(s2n_pkey_sign(cert_chain_and_key->private_key, chosen_sig_scheme.sig_alg, &conn->handshake.ccv_hash_copy, &signature));
+static int s2n_client_cert_verify_send_complete(struct s2n_connection *conn, struct s2n_blob *signature)
+{
+ struct s2n_stuffer *out = &conn->handshake.io;
- GUARD(s2n_stuffer_write_uint16(out, signature.size));
- GUARD(s2n_stuffer_write(out, &signature));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, signature->size));
+ POSIX_GUARD(s2n_stuffer_write(out, signature));
/* Client certificate has been verified. Minimize required handshake hash algs */
- GUARD(s2n_conn_update_required_handshake_hashes(conn));
+ POSIX_GUARD(s2n_conn_update_required_handshake_hashes(conn));
- return 0;
+ return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_client_finished.c b/contrib/restricted/aws/s2n/tls/s2n_client_finished.c
index cc85970145..871e7db15f 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_client_finished.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_client_finished.c
@@ -30,7 +30,7 @@ int s2n_client_finished_recv(struct s2n_connection *conn)
uint8_t *our_version;
our_version = conn->handshake.client_finished;
uint8_t *their_version = s2n_stuffer_raw_read(&conn->handshake.io, S2N_TLS_FINISHED_LEN);
- notnull_check(their_version);
+ POSIX_ENSURE_REF(their_version);
S2N_ERROR_IF(!s2n_constant_time_equals(our_version, their_version, S2N_TLS_FINISHED_LEN) || conn->handshake.rsa_failed, S2N_ERR_BAD_MESSAGE);
@@ -40,25 +40,25 @@ int s2n_client_finished_recv(struct s2n_connection *conn)
int s2n_client_finished_send(struct s2n_connection *conn)
{
uint8_t *our_version;
- GUARD(s2n_prf_client_finished(conn));
+ POSIX_GUARD(s2n_prf_client_finished(conn));
struct s2n_blob seq = {.data = conn->secure.client_sequence_number,.size = sizeof(conn->secure.client_sequence_number) };
- GUARD(s2n_blob_zero(&seq));
+ POSIX_GUARD(s2n_blob_zero(&seq));
our_version = conn->handshake.client_finished;
/* Update the server to use the cipher suite */
conn->client = &conn->secure;
if (conn->actual_protocol_version == S2N_SSLv3) {
- GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, our_version, S2N_SSL_FINISHED_LEN));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, our_version, S2N_SSL_FINISHED_LEN));
} else {
- GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, our_version, S2N_TLS_FINISHED_LEN));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, our_version, S2N_TLS_FINISHED_LEN));
}
return 0;
}
int s2n_tls13_client_finished_recv(struct s2n_connection *conn) {
- eq_check(conn->actual_protocol_version, S2N_TLS13);
+ POSIX_ENSURE_EQ(conn->actual_protocol_version, S2N_TLS13);
uint8_t length = s2n_stuffer_data_available(&conn->handshake.io);
S2N_ERROR_IF(length == 0, S2N_ERR_BAD_MESSAGE);
@@ -71,40 +71,42 @@ int s2n_tls13_client_finished_recv(struct s2n_connection *conn) {
s2n_tls13_connection_keys(keys, conn);
/* get transcript hash */
- struct s2n_hash_state hash_state = {0};
- GUARD(s2n_handshake_get_hash_state(conn, keys.hash_algorithm, &hash_state));
+ POSIX_ENSURE_REF(conn->handshake.hashes);
+ struct s2n_hash_state *hash_state = &conn->handshake.hashes->hash_workspace;
+ POSIX_GUARD_RESULT(s2n_handshake_copy_hash_state(conn, keys.hash_algorithm, hash_state));
struct s2n_blob finished_key = {0};
- GUARD(s2n_blob_init(&finished_key, conn->handshake.client_finished, keys.size));
+ POSIX_GUARD(s2n_blob_init(&finished_key, conn->handshake.client_finished, keys.size));
s2n_tls13_key_blob(client_finished_mac, keys.size);
- GUARD(s2n_tls13_calculate_finished_mac(&keys, &finished_key, &hash_state, &client_finished_mac));
+ POSIX_GUARD(s2n_tls13_calculate_finished_mac(&keys, &finished_key, hash_state, &client_finished_mac));
- GUARD(s2n_tls13_mac_verify(&keys, &client_finished_mac, &wire_finished_mac));
+ POSIX_GUARD(s2n_tls13_mac_verify(&keys, &client_finished_mac, &wire_finished_mac));
return 0;
}
int s2n_tls13_client_finished_send(struct s2n_connection *conn) {
- eq_check(conn->actual_protocol_version, S2N_TLS13);
+ POSIX_ENSURE_EQ(conn->actual_protocol_version, S2N_TLS13);
/* get tls13 keys */
s2n_tls13_connection_keys(keys, conn);
/* get transcript hash */
- struct s2n_hash_state hash_state = {0};
- GUARD(s2n_handshake_get_hash_state(conn, keys.hash_algorithm, &hash_state));
+ POSIX_ENSURE_REF(conn->handshake.hashes);
+ struct s2n_hash_state *hash_state = &conn->handshake.hashes->hash_workspace;
+ POSIX_GUARD_RESULT(s2n_handshake_copy_hash_state(conn, keys.hash_algorithm, hash_state));
/* look up finished secret key */
struct s2n_blob finished_key = {0};
- GUARD(s2n_blob_init(&finished_key, conn->handshake.client_finished, keys.size));
+ POSIX_GUARD(s2n_blob_init(&finished_key, conn->handshake.client_finished, keys.size));
/* generate the hashed message authenticated code */
s2n_stack_blob(client_finished_mac, keys.size, S2N_TLS13_SECRET_MAX_LEN);
- GUARD(s2n_tls13_calculate_finished_mac(&keys, &finished_key, &hash_state, &client_finished_mac));
+ POSIX_GUARD(s2n_tls13_calculate_finished_mac(&keys, &finished_key, hash_state, &client_finished_mac));
/* write to handshake io */
- GUARD(s2n_stuffer_write(&conn->handshake.io, &client_finished_mac));
+ POSIX_GUARD(s2n_stuffer_write(&conn->handshake.io, &client_finished_mac));
return 0;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_client_hello.c b/contrib/restricted/aws/s2n/tls/s2n_client_hello.c
index b03cda1491..0bcec3501a 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_client_hello.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_client_hello.c
@@ -21,8 +21,8 @@
#include "crypto/s2n_fips.h"
#include "error/s2n_errno.h"
-
#include "crypto/s2n_hash.h"
+#include "crypto/s2n_rsa_signing.h"
#include "tls/extensions/s2n_extension_list.h"
#include "tls/extensions/s2n_server_key_share.h"
@@ -33,9 +33,9 @@
#include "tls/s2n_connection.h"
#include "tls/s2n_client_hello.h"
#include "tls/s2n_alerts.h"
+#include "tls/s2n_handshake_type.h"
#include "tls/s2n_signature_algorithms.h"
#include "tls/s2n_tls.h"
-#include "tls/s2n_tls_digest_preferences.h"
#include "tls/s2n_security_policies.h"
#include "stuffer/s2n_stuffer.h"
@@ -45,7 +45,7 @@
#include "utils/s2n_safety.h"
struct s2n_client_hello *s2n_connection_get_client_hello(struct s2n_connection *conn) {
- if (conn->client_hello.parsed != 1) {
+ if (conn->client_hello.callback_invoked != 1) {
return NULL;
}
@@ -58,8 +58,8 @@ static uint32_t min_size(struct s2n_blob *blob, uint32_t max_length) {
static S2N_RESULT s2n_generate_client_session_id(struct s2n_connection *conn)
{
- ENSURE_REF(conn);
- ENSURE_REF(conn->config);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn->config);
/* Session id already generated - no-op */
if (conn->session_id_len) {
@@ -71,115 +71,135 @@ static S2N_RESULT s2n_generate_client_session_id(struct s2n_connection *conn)
return S2N_RESULT_OK;
}
- /* Generate the session id for TLS1.3 if in middlebox compatibility mode.
- * For now, we default to middlebox compatibility mode unless using QUIC. */
- if (conn->config->quic_enabled) {
+ /* Only generate the session id for TLS1.3 if in middlebox compatibility mode */
+ if (conn->client_protocol_version >= S2N_TLS13 && !s2n_is_middlebox_compat_enabled(conn)) {
return S2N_RESULT_OK;
}
struct s2n_blob session_id = {0};
- GUARD_AS_RESULT(s2n_blob_init(&session_id, conn->session_id, S2N_TLS_SESSION_ID_MAX_LEN));
- GUARD_RESULT(s2n_get_public_random_data(&session_id));
+ RESULT_GUARD_POSIX(s2n_blob_init(&session_id, conn->session_id, S2N_TLS_SESSION_ID_MAX_LEN));
+ RESULT_GUARD(s2n_get_public_random_data(&session_id));
conn->session_id_len = S2N_TLS_SESSION_ID_MAX_LEN;
return S2N_RESULT_OK;
}
ssize_t s2n_client_hello_get_raw_message_length(struct s2n_client_hello *ch) {
- notnull_check(ch);
+ POSIX_ENSURE_REF(ch);
return ch->raw_message.blob.size;
}
ssize_t s2n_client_hello_get_raw_message(struct s2n_client_hello *ch, uint8_t *out, uint32_t max_length)
{
- notnull_check(ch);
- notnull_check(out);
+ POSIX_ENSURE_REF(ch);
+ POSIX_ENSURE_REF(out);
uint32_t len = min_size(&ch->raw_message.blob, max_length);
struct s2n_stuffer *raw_message = &ch->raw_message;
- GUARD(s2n_stuffer_reread(raw_message));
- GUARD(s2n_stuffer_read_bytes(raw_message, out, len));
+ POSIX_GUARD(s2n_stuffer_reread(raw_message));
+ POSIX_GUARD(s2n_stuffer_read_bytes(raw_message, out, len));
return len;
}
ssize_t s2n_client_hello_get_cipher_suites_length(struct s2n_client_hello *ch) {
- notnull_check(ch);
+ POSIX_ENSURE_REF(ch);
return ch->cipher_suites.size;
}
+int s2n_client_hello_cb_done(struct s2n_connection *conn)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->config);
+ POSIX_ENSURE(conn->config->client_hello_cb_mode ==
+ S2N_CLIENT_HELLO_CB_NONBLOCKING, S2N_ERR_INVALID_STATE);
+ POSIX_ENSURE(conn->client_hello.callback_invoked == 1, S2N_ERR_ASYNC_NOT_PERFORMED);
+ POSIX_ENSURE(conn->client_hello.parsed == 1, S2N_ERR_INVALID_STATE);
+
+ conn->client_hello.callback_async_blocked = 0;
+ conn->client_hello.callback_async_done = 1;
+
+ return S2N_SUCCESS;
+}
+
ssize_t s2n_client_hello_get_cipher_suites(struct s2n_client_hello *ch, uint8_t *out, uint32_t max_length)
{
- notnull_check(ch);
- notnull_check(out);
- notnull_check(ch->cipher_suites.data);
+ POSIX_ENSURE_REF(ch);
+ POSIX_ENSURE_REF(out);
+ POSIX_ENSURE_REF(ch->cipher_suites.data);
uint32_t len = min_size(&ch->cipher_suites, max_length);
- memcpy_check(out, &ch->cipher_suites.data, len);
+ POSIX_CHECKED_MEMCPY(out, ch->cipher_suites.data, len);
return len;
}
ssize_t s2n_client_hello_get_extensions_length(struct s2n_client_hello *ch) {
- notnull_check(ch);
+ POSIX_ENSURE_REF(ch);
return ch->extensions.raw.size;
}
ssize_t s2n_client_hello_get_extensions(struct s2n_client_hello *ch, uint8_t *out, uint32_t max_length)
{
- notnull_check(ch);
- notnull_check(out);
- notnull_check(ch->extensions.raw.data);
+ POSIX_ENSURE_REF(ch);
+ POSIX_ENSURE_REF(out);
+ POSIX_ENSURE_REF(ch->extensions.raw.data);
uint32_t len = min_size(&ch->extensions.raw, max_length);
- memcpy_check(out, &ch->extensions.raw.data, len);
+ POSIX_CHECKED_MEMCPY(out, ch->extensions.raw.data, len);
return len;
}
int s2n_client_hello_free(struct s2n_client_hello *client_hello)
{
- notnull_check(client_hello);
+ POSIX_ENSURE_REF(client_hello);
- GUARD(s2n_stuffer_free(&client_hello->raw_message));
+ POSIX_GUARD(s2n_stuffer_free(&client_hello->raw_message));
/* These point to data in the raw_message stuffer,
so we don't need to free them */
client_hello->cipher_suites.data = NULL;
client_hello->extensions.raw.data = NULL;
+ /* clean the CH nonblocking callback flags
+ * incase we are preparing for CH retry */
+ client_hello->callback_async_blocked = 0;
+ client_hello->callback_async_done = 0;
+ client_hello->parsed = 0;
+
return 0;
}
int s2n_collect_client_hello(struct s2n_connection *conn, struct s2n_stuffer *source)
{
- notnull_check(conn);
- notnull_check(source);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(source);
uint32_t size = s2n_stuffer_data_available(source);
S2N_ERROR_IF(size == 0, S2N_ERR_BAD_MESSAGE);
struct s2n_client_hello *ch = &conn->client_hello;
- GUARD(s2n_stuffer_resize(&ch->raw_message, size));
- GUARD(s2n_stuffer_copy(source, &ch->raw_message, size));
+ POSIX_GUARD(s2n_stuffer_resize(&ch->raw_message, size));
+ POSIX_GUARD(s2n_stuffer_copy(source, &ch->raw_message, size));
return 0;
}
-static int s2n_parse_client_hello(struct s2n_connection *conn)
+int s2n_parse_client_hello(struct s2n_connection *conn)
{
- notnull_check(conn);
- GUARD(s2n_collect_client_hello(conn, &conn->handshake.io));
+ POSIX_ENSURE_REF(conn);
+ POSIX_GUARD(s2n_collect_client_hello(conn, &conn->handshake.io));
if (conn->client_hello_version == S2N_SSLv2) {
- GUARD(s2n_sslv2_client_hello_recv(conn));
+ POSIX_GUARD(s2n_sslv2_client_hello_recv(conn));
return S2N_SUCCESS;
}
@@ -189,8 +209,8 @@ static int s2n_parse_client_hello(struct s2n_connection *conn)
uint8_t client_protocol_version[S2N_TLS_PROTOCOL_VERSION_LEN];
- GUARD(s2n_stuffer_read_bytes(in, client_protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
- GUARD(s2n_stuffer_erase_and_read_bytes(in, conn->secure.client_random, S2N_TLS_RANDOM_DATA_LEN));
+ POSIX_GUARD(s2n_stuffer_read_bytes(in, client_protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
+ POSIX_GUARD(s2n_stuffer_erase_and_read_bytes(in, conn->handshake_params.client_random, S2N_TLS_RANDOM_DATA_LEN));
/* Protocol version in the ClientHello is fixed at 0x0303(TLS 1.2) for
* future versions of TLS. Therefore, we will negotiate down if a client sends
@@ -199,51 +219,58 @@ static int s2n_parse_client_hello(struct s2n_connection *conn)
conn->client_protocol_version = MIN((client_protocol_version[0] * 10) + client_protocol_version[1], S2N_TLS12);
conn->client_hello_version = conn->client_protocol_version;
- GUARD(s2n_stuffer_read_uint8(in, &conn->session_id_len));
+ POSIX_GUARD(s2n_stuffer_read_uint8(in, &conn->session_id_len));
S2N_ERROR_IF(conn->session_id_len > S2N_TLS_SESSION_ID_MAX_LEN || conn->session_id_len > s2n_stuffer_data_available(in), S2N_ERR_BAD_MESSAGE);
-
- GUARD(s2n_stuffer_read_bytes(in, conn->session_id, conn->session_id_len));
+ POSIX_GUARD(s2n_blob_init(&client_hello->session_id, s2n_stuffer_raw_read(in, conn->session_id_len), conn->session_id_len));
+ POSIX_CHECKED_MEMCPY(conn->session_id, client_hello->session_id.data, conn->session_id_len);
uint16_t cipher_suites_length = 0;
- GUARD(s2n_stuffer_read_uint16(in, &cipher_suites_length));
- ENSURE_POSIX(cipher_suites_length > 0, S2N_ERR_BAD_MESSAGE);
- ENSURE_POSIX(cipher_suites_length % S2N_TLS_CIPHER_SUITE_LEN == 0, S2N_ERR_BAD_MESSAGE);
-
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &cipher_suites_length));
+ POSIX_ENSURE(cipher_suites_length > 0, S2N_ERR_BAD_MESSAGE);
+ POSIX_ENSURE(cipher_suites_length % S2N_TLS_CIPHER_SUITE_LEN == 0, S2N_ERR_BAD_MESSAGE);
+
client_hello->cipher_suites.size = cipher_suites_length;
client_hello->cipher_suites.data = s2n_stuffer_raw_read(in, cipher_suites_length);
- notnull_check(client_hello->cipher_suites.data);
+ POSIX_ENSURE_REF(client_hello->cipher_suites.data);
/* Don't choose the cipher yet, read the extensions first */
uint8_t num_compression_methods = 0;
- GUARD(s2n_stuffer_read_uint8(in, &num_compression_methods));
- GUARD(s2n_stuffer_skip_read(in, num_compression_methods));
+ POSIX_GUARD(s2n_stuffer_read_uint8(in, &num_compression_methods));
+ POSIX_GUARD(s2n_stuffer_skip_read(in, num_compression_methods));
const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
-
- /* This is going to be our fallback if the client has no preference. */
- /* A TLS-compliant application MUST support key exchange with secp256r1 (NIST P-256) */
- /* and SHOULD support key exchange with X25519 [RFC7748]. */
- /* - https://tools.ietf.org/html/rfc8446#section-9.1 */
- conn->secure.server_ecc_evp_params.negotiated_curve = &s2n_ecc_curve_secp256r1;
+ POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
+ POSIX_ENSURE_REF(ecc_pref);
+ POSIX_ENSURE_GT(ecc_pref->count, 0);
+
+ if (s2n_ecc_preferences_includes_curve(ecc_pref, TLS_EC_CURVE_SECP_256_R1)) {
+ /* This is going to be our fallback if the client has no preference. */
+ /* A TLS-compliant application MUST support key exchange with secp256r1 (NIST P-256) */
+ /* and SHOULD support key exchange with X25519 [RFC7748]. */
+ /* - https://tools.ietf.org/html/rfc8446#section-9.1 */
+ conn->kex_params.server_ecc_evp_params.negotiated_curve = &s2n_ecc_curve_secp256r1;
+ } else {
+ /* P-256 is the preferred fallback option. These prefs don't support it, so choose whatever curve is first. */
+ conn->kex_params.server_ecc_evp_params.negotiated_curve = ecc_pref->ecc_curves[0];
+ }
- GUARD(s2n_extension_list_parse(in, &conn->client_hello.extensions));
+ POSIX_GUARD(s2n_extension_list_parse(in, &conn->client_hello.extensions));
return S2N_SUCCESS;
}
int s2n_process_client_hello(struct s2n_connection *conn)
{
+ POSIX_ENSURE_REF(conn);
+
/* Client hello is parsed and config is finalized.
* Negotiate protocol version, cipher suite, ALPN, select a cert, etc. */
struct s2n_client_hello *client_hello = &conn->client_hello;
const struct s2n_security_policy *security_policy;
- GUARD(s2n_connection_get_security_policy(conn, &security_policy));
+ POSIX_GUARD(s2n_connection_get_security_policy(conn, &security_policy));
- /* Ensure that highest supported version is set correctly */
- if (!s2n_security_policy_supports_tls13(security_policy)) {
+ if (!s2n_connection_supports_tls13(conn) || !s2n_security_policy_supports_tls13(security_policy)) {
conn->server_protocol_version = MIN(conn->server_protocol_version, S2N_TLS12);
conn->actual_protocol_version = MIN(conn->server_protocol_version, S2N_TLS12);
}
@@ -252,11 +279,11 @@ int s2n_process_client_hello(struct s2n_connection *conn)
* To keep the version in client_hello intact for the extension retrieval APIs, process a copy instead.
*/
s2n_parsed_extensions_list copy_of_parsed_extensions = conn->client_hello.extensions;
- GUARD(s2n_extension_list_process(S2N_EXTENSION_LIST_CLIENT_HELLO, conn, &copy_of_parsed_extensions));
+ POSIX_GUARD(s2n_extension_list_process(S2N_EXTENSION_LIST_CLIENT_HELLO, conn, &copy_of_parsed_extensions));
/* After parsing extensions, select a curve and corresponding keyshare to use */
if (conn->actual_protocol_version >= S2N_TLS13) {
- GUARD(s2n_extensions_server_key_share_select(conn));
+ POSIX_GUARD(s2n_extensions_server_key_share_select(conn));
}
/* for pre TLS 1.3 connections, protocol selection is not done in supported_versions extensions, so do it here */
@@ -265,25 +292,19 @@ int s2n_process_client_hello(struct s2n_connection *conn)
}
if (conn->client_protocol_version < security_policy->minimum_protocol_version) {
- GUARD(s2n_queue_reader_unsupported_protocol_version_alert(conn));
- S2N_ERROR(S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
+ POSIX_GUARD(s2n_queue_reader_unsupported_protocol_version_alert(conn));
+ POSIX_BAIL(S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
}
- if (conn->config->quic_enabled) {
- ENSURE_POSIX(conn->actual_protocol_version >= S2N_TLS13, S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
-
- /* In TLS1.3, legacy_session_id is only set to indicate middlebox compatability mode.
- * When running with QUIC, S2N does not support middlebox compatability mode.
- * https://tools.ietf.org/html/draft-ietf-quic-tls-32#section-8.4
- */
- ENSURE_POSIX(conn->session_id_len == 0, S2N_ERR_BAD_MESSAGE);
+ if (s2n_connection_is_quic_enabled(conn)) {
+ POSIX_ENSURE(conn->actual_protocol_version >= S2N_TLS13, S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
}
/* Find potential certificate matches before we choose the cipher. */
- GUARD(s2n_conn_find_name_matching_certs(conn));
+ POSIX_GUARD(s2n_conn_find_name_matching_certs(conn));
/* Now choose the ciphers we have certs for. */
- GUARD(s2n_set_cipher_as_tls_server(conn, client_hello->cipher_suites.data, client_hello->cipher_suites.size / 2));
+ POSIX_GUARD(s2n_set_cipher_as_tls_server(conn, client_hello->cipher_suites.data, client_hello->cipher_suites.size / 2));
/* If we're using a PSK, we don't need to choose a signature algorithm or certificate,
* because no additional auth is required. */
@@ -292,58 +313,104 @@ int s2n_process_client_hello(struct s2n_connection *conn)
}
/* And set the signature and hash algorithm used for key exchange signatures */
- GUARD(s2n_choose_sig_scheme_from_peer_preference_list(conn,
+ POSIX_GUARD(s2n_choose_sig_scheme_from_peer_preference_list(conn,
&conn->handshake_params.client_sig_hash_algs,
- &conn->secure.conn_sig_scheme));
+ &conn->handshake_params.conn_sig_scheme));
/* And finally, set the certs specified by the final auth + sig_alg combo. */
- GUARD(s2n_select_certs_for_server_auth(conn, &conn->handshake_params.our_chain_and_key));
+ POSIX_GUARD(s2n_select_certs_for_server_auth(conn, &conn->handshake_params.our_chain_and_key));
return S2N_SUCCESS;
}
+static S2N_RESULT s2n_client_hello_process_cb_response(struct s2n_connection *conn, int rc)
+{
+ if (rc < 0) {
+ goto fail;
+ }
+ switch(conn->config->client_hello_cb_mode) {
+ case S2N_CLIENT_HELLO_CB_BLOCKING : {
+ if(rc) {
+ conn->server_name_used = 1;
+ }
+ return S2N_RESULT_OK;
+ }
+ case S2N_CLIENT_HELLO_CB_NONBLOCKING : {
+ if (conn->client_hello.callback_async_done) {
+ return S2N_RESULT_OK;
+ }
+ conn->client_hello.callback_async_blocked = 1;
+ RESULT_BAIL(S2N_ERR_ASYNC_BLOCKED);
+ }
+ }
+fail:
+ /* rc < 0 */
+ RESULT_GUARD_POSIX(s2n_queue_reader_handshake_failure_alert(conn));
+ RESULT_BAIL(S2N_ERR_CANCELLED);
+}
+
+bool s2n_client_hello_invoke_callback(struct s2n_connection *conn) {
+ /* Invoke only if the callback has not been called or if polling mode is enabled */
+ bool invoke = !conn->client_hello.callback_invoked || conn->config->client_hello_cb_enable_poll;
+ /*
+ * The callback should not be called if this client_hello is in response to a hello retry.
+ */
+ return invoke && !IS_HELLO_RETRY_HANDSHAKE(conn);
+}
+
int s2n_client_hello_recv(struct s2n_connection *conn)
{
- /* Parse client hello */
- GUARD(s2n_parse_client_hello(conn));
+ if (conn->config->client_hello_cb_enable_poll == 0) {
+ POSIX_ENSURE(conn->client_hello.callback_async_blocked == 0, S2N_ERR_ASYNC_BLOCKED);
+ }
- /* If the CLIENT_HELLO has already been parsed, then we should not call
- * the client_hello_cb a second time. */
if (conn->client_hello.parsed == 0) {
- /* Mark the collected client hello as available when parsing is done and before the client hello callback */
+ /* Parse client hello */
+ POSIX_GUARD(s2n_parse_client_hello(conn));
conn->client_hello.parsed = 1;
+ }
+ /* Call the client_hello_cb once unless polling is enabled. */
+ if (s2n_client_hello_invoke_callback(conn)) {
+ /* Mark the collected client hello as available when parsing is done and before the client hello callback */
+ conn->client_hello.callback_invoked = 1;
/* Call client_hello_cb if exists, letting application to modify s2n_connection or swap s2n_config */
if (conn->config->client_hello_cb) {
int rc = conn->config->client_hello_cb(conn, conn->config->client_hello_cb_ctx);
- if (rc < 0) {
- GUARD(s2n_queue_reader_handshake_failure_alert(conn));
- S2N_ERROR(S2N_ERR_CANCELLED);
- }
- if (rc) {
- conn->server_name_used = 1;
- }
+ POSIX_GUARD_RESULT(s2n_client_hello_process_cb_response(conn, rc));
}
}
if (conn->client_hello_version != S2N_SSLv2) {
- GUARD(s2n_process_client_hello(conn));
+ POSIX_GUARD(s2n_process_client_hello(conn));
}
return 0;
}
+S2N_RESULT s2n_cipher_suite_validate_available(struct s2n_connection *conn, struct s2n_cipher_suite *cipher)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(cipher);
+ RESULT_ENSURE_EQ(cipher->available, true);
+ RESULT_ENSURE_LTE(cipher->minimum_required_tls_version, conn->client_protocol_version);
+ if (s2n_connection_is_quic_enabled(conn)) {
+ RESULT_ENSURE_GTE(cipher->minimum_required_tls_version, S2N_TLS13);
+ }
+ return S2N_RESULT_OK;
+}
+
int s2n_client_hello_send(struct s2n_connection *conn)
{
+ POSIX_ENSURE_REF(conn);
+
const struct s2n_security_policy *security_policy;
- GUARD(s2n_connection_get_security_policy(conn, &security_policy));
+ POSIX_GUARD(s2n_connection_get_security_policy(conn, &security_policy));
const struct s2n_cipher_preferences *cipher_preferences = security_policy->cipher_preferences;
- notnull_check(cipher_preferences);
+ POSIX_ENSURE_REF(cipher_preferences);
- /* Check whether cipher preference supports TLS 1.3. If it doesn't,
- our highest supported version is S2N_TLS12 */
- if (!s2n_security_policy_supports_tls13(security_policy)) {
+ if (!s2n_connection_supports_tls13(conn) || !s2n_security_policy_supports_tls13(security_policy)) {
conn->client_protocol_version = MIN(conn->client_protocol_version, S2N_TLS12);
conn->actual_protocol_version = MIN(conn->actual_protocol_version, S2N_TLS12);
}
@@ -353,61 +420,74 @@ int s2n_client_hello_send(struct s2n_connection *conn)
uint8_t client_protocol_version[S2N_TLS_PROTOCOL_VERSION_LEN] = {0};
struct s2n_blob b = {0};
- GUARD(s2n_blob_init(&b, conn->secure.client_random, S2N_TLS_RANDOM_DATA_LEN));
+ POSIX_GUARD(s2n_blob_init(&b, conn->handshake_params.client_random, S2N_TLS_RANDOM_DATA_LEN));
/* Create the client random data */
- GUARD(s2n_stuffer_init(&client_random, &b));
+ POSIX_GUARD(s2n_stuffer_init(&client_random, &b));
struct s2n_blob r = {0};
- GUARD(s2n_blob_init(&r, s2n_stuffer_raw_write(&client_random, S2N_TLS_RANDOM_DATA_LEN), S2N_TLS_RANDOM_DATA_LEN));
- notnull_check(r.data);
- GUARD_AS_POSIX(s2n_get_public_random_data(&r));
+ POSIX_GUARD(s2n_blob_init(&r, s2n_stuffer_raw_write(&client_random, S2N_TLS_RANDOM_DATA_LEN), S2N_TLS_RANDOM_DATA_LEN));
+ POSIX_ENSURE_REF(r.data);
+ POSIX_GUARD_RESULT(s2n_get_public_random_data(&r));
uint8_t reported_protocol_version = MIN(conn->client_protocol_version, S2N_TLS12);
client_protocol_version[0] = reported_protocol_version / 10;
client_protocol_version[1] = reported_protocol_version % 10;
conn->client_hello_version = reported_protocol_version;
- GUARD(s2n_stuffer_write_bytes(out, client_protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
- GUARD(s2n_stuffer_copy(&client_random, out, S2N_TLS_RANDOM_DATA_LEN));
+ POSIX_GUARD(s2n_stuffer_write_bytes(out, client_protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
+ POSIX_GUARD(s2n_stuffer_copy(&client_random, out, S2N_TLS_RANDOM_DATA_LEN));
- GUARD_AS_POSIX(s2n_generate_client_session_id(conn));
- GUARD(s2n_stuffer_write_uint8(out, conn->session_id_len));
+ POSIX_GUARD_RESULT(s2n_generate_client_session_id(conn));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, conn->session_id_len));
if (conn->session_id_len > 0) {
- GUARD(s2n_stuffer_write_bytes(out, conn->session_id, conn->session_id_len));
+ POSIX_GUARD(s2n_stuffer_write_bytes(out, conn->session_id, conn->session_id_len));
}
/* Reserve space for size of the list of available ciphers */
struct s2n_stuffer_reservation available_cipher_suites_size;
- GUARD(s2n_stuffer_reserve_uint16(out, &available_cipher_suites_size));
+ POSIX_GUARD(s2n_stuffer_reserve_uint16(out, &available_cipher_suites_size));
/* Now, write the IANA values of every available cipher suite in our list */
+ struct s2n_cipher_suite *cipher = NULL;
+ bool legacy_renegotiation_signal_required = false;
for (int i = 0; i < security_policy->cipher_preferences->count; i++ ) {
- if (cipher_preferences->suites[i]->available &&
- cipher_preferences->suites[i]->minimum_required_tls_version <= conn->client_protocol_version) {
- GUARD(s2n_stuffer_write_bytes(out, security_policy->cipher_preferences->suites[i]->iana_value, S2N_TLS_CIPHER_SUITE_LEN));
+ cipher = cipher_preferences->suites[i];
+ if (s2n_result_is_error(s2n_cipher_suite_validate_available(conn, cipher))) {
+ continue;
+ }
+ if (cipher->minimum_required_tls_version < S2N_TLS13) {
+ legacy_renegotiation_signal_required = true;
}
+ POSIX_GUARD(s2n_stuffer_write_bytes(out, cipher->iana_value, S2N_TLS_CIPHER_SUITE_LEN));
}
- /* Lastly, write TLS_EMPTY_RENEGOTIATION_INFO_SCSV so that server knows it's an initial handshake (RFC5746 Section 3.4) */
- uint8_t renegotiation_info_scsv[S2N_TLS_CIPHER_SUITE_LEN] = { TLS_EMPTY_RENEGOTIATION_INFO_SCSV };
- GUARD(s2n_stuffer_write_bytes(out, renegotiation_info_scsv, S2N_TLS_CIPHER_SUITE_LEN));
+ if (legacy_renegotiation_signal_required) {
+ /* Lastly, write TLS_EMPTY_RENEGOTIATION_INFO_SCSV so that server knows it's an initial handshake (RFC5746 Section 3.4) */
+ uint8_t renegotiation_info_scsv[S2N_TLS_CIPHER_SUITE_LEN] = { TLS_EMPTY_RENEGOTIATION_INFO_SCSV };
+ POSIX_GUARD(s2n_stuffer_write_bytes(out, renegotiation_info_scsv, S2N_TLS_CIPHER_SUITE_LEN));
+ }
/* Write size of the list of available ciphers */
- GUARD(s2n_stuffer_write_vector_size(&available_cipher_suites_size));
+ POSIX_GUARD(s2n_stuffer_write_vector_size(&available_cipher_suites_size));
/* Zero compression methods */
- GUARD(s2n_stuffer_write_uint8(out, 1));
- GUARD(s2n_stuffer_write_uint8(out, 0));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, 1));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, 0));
/* Write the extensions */
- GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_CLIENT_HELLO, conn, out));
+ POSIX_GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_CLIENT_HELLO, conn, out));
/* Once the message is complete, finish calculating the PSK binders.
*
* The PSK binders require all the sizes in the ClientHello to be written correctly,
* including the extension size and extension list size, and therefore have
* to be calculated AFTER we finish writing the entire extension list. */
- GUARD_AS_POSIX(s2n_finish_psk_extension(conn));
+ POSIX_GUARD_RESULT(s2n_finish_psk_extension(conn));
+
+ /* If early data was not requested as part of the ClientHello, it never will be. */
+ if (conn->early_data_state == S2N_UNKNOWN_EARLY_DATA_STATE) {
+ POSIX_GUARD_RESULT(s2n_connection_set_early_data_state(conn, S2N_EARLY_DATA_NOT_REQUESTED));
+ }
return S2N_SUCCESS;
}
@@ -419,54 +499,53 @@ int s2n_sslv2_client_hello_recv(struct s2n_connection *conn)
struct s2n_stuffer *in = &client_hello->raw_message;
const struct s2n_security_policy *security_policy;
- GUARD(s2n_connection_get_security_policy(conn, &security_policy));
+ POSIX_GUARD(s2n_connection_get_security_policy(conn, &security_policy));
if (conn->client_protocol_version < security_policy->minimum_protocol_version) {
- GUARD(s2n_queue_reader_unsupported_protocol_version_alert(conn));
- S2N_ERROR(S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
+ POSIX_GUARD(s2n_queue_reader_unsupported_protocol_version_alert(conn));
+ POSIX_BAIL(S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
}
conn->actual_protocol_version = MIN(conn->client_protocol_version, conn->server_protocol_version);
/* We start 5 bytes into the record */
uint16_t cipher_suites_length;
- GUARD(s2n_stuffer_read_uint16(in, &cipher_suites_length));
- ENSURE_POSIX(cipher_suites_length > 0, S2N_ERR_BAD_MESSAGE);
- ENSURE_POSIX(cipher_suites_length % S2N_SSLv2_CIPHER_SUITE_LEN == 0, S2N_ERR_BAD_MESSAGE);
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &cipher_suites_length));
+ POSIX_ENSURE(cipher_suites_length > 0, S2N_ERR_BAD_MESSAGE);
+ POSIX_ENSURE(cipher_suites_length % S2N_SSLv2_CIPHER_SUITE_LEN == 0, S2N_ERR_BAD_MESSAGE);
uint16_t session_id_length;
- GUARD(s2n_stuffer_read_uint16(in, &session_id_length));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &session_id_length));
uint16_t challenge_length;
- GUARD(s2n_stuffer_read_uint16(in, &challenge_length));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &challenge_length));
S2N_ERROR_IF(challenge_length > S2N_TLS_RANDOM_DATA_LEN, S2N_ERR_BAD_MESSAGE);
client_hello->cipher_suites.size = cipher_suites_length;
client_hello->cipher_suites.data = s2n_stuffer_raw_read(in, cipher_suites_length);
- notnull_check(client_hello->cipher_suites.data);
+ POSIX_ENSURE_REF(client_hello->cipher_suites.data);
/* Find potential certificate matches before we choose the cipher. */
- GUARD(s2n_conn_find_name_matching_certs(conn));
+ POSIX_GUARD(s2n_conn_find_name_matching_certs(conn));
- GUARD(s2n_set_cipher_as_sslv2_server(conn, client_hello->cipher_suites.data, client_hello->cipher_suites.size / S2N_SSLv2_CIPHER_SUITE_LEN));
- GUARD(s2n_choose_default_sig_scheme(conn, &conn->secure.conn_sig_scheme));
- GUARD(s2n_select_certs_for_server_auth(conn, &conn->handshake_params.our_chain_and_key));
+ POSIX_GUARD(s2n_set_cipher_as_sslv2_server(conn, client_hello->cipher_suites.data, client_hello->cipher_suites.size / S2N_SSLv2_CIPHER_SUITE_LEN));
+ POSIX_GUARD(s2n_choose_default_sig_scheme(conn, &conn->handshake_params.conn_sig_scheme, S2N_SERVER));
+ POSIX_GUARD(s2n_select_certs_for_server_auth(conn, &conn->handshake_params.our_chain_and_key));
S2N_ERROR_IF(session_id_length > s2n_stuffer_data_available(in), S2N_ERR_BAD_MESSAGE);
+ POSIX_GUARD(s2n_blob_init(&client_hello->session_id, s2n_stuffer_raw_read(in, session_id_length), session_id_length));
if (session_id_length > 0 && session_id_length <= S2N_TLS_SESSION_ID_MAX_LEN) {
- GUARD(s2n_stuffer_read_bytes(in, conn->session_id, session_id_length));
+ POSIX_CHECKED_MEMCPY(conn->session_id, client_hello->session_id.data, session_id_length);
conn->session_id_len = (uint8_t) session_id_length;
- } else {
- GUARD(s2n_stuffer_skip_read(in, session_id_length));
}
struct s2n_blob b = {0};
- GUARD(s2n_blob_init(&b, conn->secure.client_random, S2N_TLS_RANDOM_DATA_LEN));
+ POSIX_GUARD(s2n_blob_init(&b, conn->handshake_params.client_random, S2N_TLS_RANDOM_DATA_LEN));
b.data += S2N_TLS_RANDOM_DATA_LEN - challenge_length;
b.size -= S2N_TLS_RANDOM_DATA_LEN - challenge_length;
- GUARD(s2n_stuffer_read(in, &b));
+ POSIX_GUARD(s2n_stuffer_read(in, &b));
return 0;
}
@@ -474,15 +553,15 @@ int s2n_sslv2_client_hello_recv(struct s2n_connection *conn)
static int s2n_client_hello_get_parsed_extension(s2n_tls_extension_type extension_type,
s2n_parsed_extensions_list *parsed_extension_list, s2n_parsed_extension **parsed_extension)
{
- notnull_check(parsed_extension_list);
- notnull_check(parsed_extension);
+ POSIX_ENSURE_REF(parsed_extension_list);
+ POSIX_ENSURE_REF(parsed_extension);
s2n_extension_type_id extension_type_id;
- GUARD(s2n_extension_supported_iana_value_to_id(extension_type, &extension_type_id));
+ POSIX_GUARD(s2n_extension_supported_iana_value_to_id(extension_type, &extension_type_id));
s2n_parsed_extension *found_parsed_extension = &parsed_extension_list->parsed_extensions[extension_type_id];
- notnull_check(found_parsed_extension->extension.data);
- ENSURE_POSIX(found_parsed_extension->extension_type == extension_type, S2N_ERR_INVALID_PARSED_EXTENSIONS);
+ POSIX_ENSURE_REF(found_parsed_extension->extension.data);
+ POSIX_ENSURE(found_parsed_extension->extension_type == extension_type, S2N_ERR_INVALID_PARSED_EXTENSIONS);
*parsed_extension = found_parsed_extension;
return S2N_SUCCESS;
@@ -490,7 +569,7 @@ static int s2n_client_hello_get_parsed_extension(s2n_tls_extension_type extensio
ssize_t s2n_client_hello_get_extension_length(struct s2n_client_hello *ch, s2n_tls_extension_type extension_type)
{
- notnull_check(ch);
+ POSIX_ENSURE_REF(ch);
s2n_parsed_extension *parsed_extension = NULL;
if (s2n_client_hello_get_parsed_extension(extension_type, &ch->extensions, &parsed_extension) != S2N_SUCCESS) {
@@ -502,8 +581,8 @@ ssize_t s2n_client_hello_get_extension_length(struct s2n_client_hello *ch, s2n_t
ssize_t s2n_client_hello_get_extension_by_id(struct s2n_client_hello *ch, s2n_tls_extension_type extension_type, uint8_t *out, uint32_t max_length)
{
- notnull_check(ch);
- notnull_check(out);
+ POSIX_ENSURE_REF(ch);
+ POSIX_ENSURE_REF(out);
s2n_parsed_extension *parsed_extension = NULL;
if (s2n_client_hello_get_parsed_extension(extension_type, &ch->extensions, &parsed_extension) != S2N_SUCCESS) {
@@ -511,6 +590,81 @@ ssize_t s2n_client_hello_get_extension_by_id(struct s2n_client_hello *ch, s2n_tl
}
uint32_t len = min_size(&parsed_extension->extension, max_length);
- memcpy_check(out, parsed_extension->extension.data, len);
+ POSIX_CHECKED_MEMCPY(out, parsed_extension->extension.data, len);
return len;
}
+
+int s2n_client_hello_get_session_id_length(struct s2n_client_hello *ch, uint32_t *out_length)
+{
+ POSIX_ENSURE_REF(ch);
+ POSIX_ENSURE_REF(out_length);
+ *out_length = ch->session_id.size;
+ return S2N_SUCCESS;
+}
+
+int s2n_client_hello_get_session_id(struct s2n_client_hello *ch, uint8_t *out, uint32_t *out_length, uint32_t max_length)
+{
+ POSIX_ENSURE_REF(ch);
+ POSIX_ENSURE_REF(out);
+ POSIX_ENSURE_REF(out_length);
+
+ uint32_t len = min_size(&ch->session_id, max_length);
+ POSIX_CHECKED_MEMCPY(out, ch->session_id.data, len);
+ *out_length = len;
+
+ return S2N_SUCCESS;
+}
+
+static S2N_RESULT s2n_client_hello_get_raw_extension(uint16_t extension_iana,
+ struct s2n_blob *raw_extensions, struct s2n_blob *extension)
+{
+ RESULT_ENSURE_REF(raw_extensions);
+ RESULT_ENSURE_REF(extension);
+
+ *extension = (struct s2n_blob) { 0 };
+
+ struct s2n_stuffer raw_extensions_stuffer = { 0 };
+ RESULT_GUARD_POSIX(s2n_stuffer_init(&raw_extensions_stuffer, raw_extensions));
+ RESULT_GUARD_POSIX(s2n_stuffer_skip_write(&raw_extensions_stuffer, raw_extensions->size));
+
+ while (s2n_stuffer_data_available(&raw_extensions_stuffer) > 0) {
+ uint16_t extension_type = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint16(&raw_extensions_stuffer, &extension_type));
+
+ uint16_t extension_size = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint16(&raw_extensions_stuffer, &extension_size));
+
+ uint8_t *extension_data = s2n_stuffer_raw_read(&raw_extensions_stuffer, extension_size);
+ RESULT_ENSURE_REF(extension_data);
+
+ if (extension_iana == extension_type) {
+ RESULT_GUARD_POSIX(s2n_blob_init(extension, extension_data, extension_size));
+ return S2N_RESULT_OK;
+ }
+ }
+ return S2N_RESULT_OK;
+}
+
+int s2n_client_hello_has_extension(struct s2n_client_hello *ch, uint16_t extension_iana, bool *exists)
+{
+ POSIX_ENSURE_REF(ch);
+ POSIX_ENSURE_REF(exists);
+
+ *exists = false;
+
+ s2n_extension_type_id extension_type_id = s2n_unsupported_extension;
+ if (s2n_extension_supported_iana_value_to_id(extension_iana, &extension_type_id) == S2N_SUCCESS) {
+ s2n_parsed_extension *parsed_extension = NULL;
+ if (s2n_client_hello_get_parsed_extension(extension_iana, &ch->extensions, &parsed_extension) == S2N_SUCCESS) {
+ *exists = true;
+ }
+ return S2N_SUCCESS;
+ }
+
+ struct s2n_blob extension = { 0 };
+ POSIX_GUARD_RESULT(s2n_client_hello_get_raw_extension(extension_iana, &ch->extensions.raw, &extension));
+ if (extension.data != NULL) {
+ *exists = true;
+ }
+ return S2N_SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_client_hello.h b/contrib/restricted/aws/s2n/tls/s2n_client_hello.h
index 59eb0b12c7..d319bbd606 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_client_hello.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_client_hello.h
@@ -16,7 +16,7 @@
#pragma once
#include <stdint.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include "stuffer/s2n_stuffer.h"
#include "tls/extensions/s2n_extension_list.h"
@@ -31,7 +31,18 @@ struct s2n_client_hello {
s2n_parsed_extensions_list extensions;
struct s2n_blob cipher_suites;
-
+ struct s2n_blob session_id;
+
+ unsigned int callback_invoked:1;
+ unsigned int callback_async_blocked:1;
+ unsigned int callback_async_done:1;
+ /*
+ * Marks if the client hello has been parsed.
+ *
+ * While a client_hello is only parsed once, it is possible to parse
+ * two different client_hello during a single handshake if the server
+ * issues a hello retry.
+ */
unsigned int parsed:1;
};
diff --git a/contrib/restricted/aws/s2n/tls/s2n_client_hello_request.c b/contrib/restricted/aws/s2n/tls/s2n_client_hello_request.c
new file mode 100644
index 0000000000..6444c87f5d
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_client_hello_request.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include "api/s2n.h"
+
+#include "tls/s2n_connection.h"
+#include "utils/s2n_safety.h"
+
+int s2n_client_hello_request_recv(struct s2n_connection *conn)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE(conn->actual_protocol_version < S2N_TLS13, S2N_ERR_BAD_MESSAGE);
+
+ /*
+ *= https://tools.ietf.org/rfc/rfc5246#section-7.4.1.1
+ *# The HelloRequest message MAY be sent by the server at any time.
+ */
+ POSIX_ENSURE(conn->mode == S2N_CLIENT, S2N_ERR_BAD_MESSAGE);
+
+ /*
+ *= https://tools.ietf.org/rfc/rfc5246#section-7.4.1.1
+ *# This message will be ignored by the client if the client is
+ *# currently negotiating a session. This message MAY be ignored by
+ *# the client if it does not wish to renegotiate a session, or the
+ *# client may, if it wishes, respond with a no_renegotiation alert.
+ */
+ return S2N_SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_client_key_exchange.c b/contrib/restricted/aws/s2n/tls/s2n_client_key_exchange.c
index 1428e177b2..7b669e1ba6 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_client_key_exchange.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_client_key_exchange.c
@@ -14,7 +14,7 @@
*/
#include <sys/param.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include "error/s2n_errno.h"
@@ -24,6 +24,7 @@
#include "tls/s2n_cipher_suites.h"
#include "tls/s2n_connection.h"
#include "tls/s2n_kex.h"
+#include "tls/s2n_key_log.h"
#include "tls/s2n_resume.h"
#include "stuffer/s2n_stuffer.h"
@@ -45,35 +46,35 @@ static int s2n_rsa_client_key_recv_complete(struct s2n_connection *conn, bool rs
static int s2n_hybrid_client_action(struct s2n_connection *conn, struct s2n_blob *combined_shared_key,
s2n_kex_client_key_method kex_method, uint32_t *cursor, s2n_stuffer_action stuffer_action)
{
- notnull_check(kex_method);
- notnull_check(stuffer_action);
+ POSIX_ENSURE_REF(kex_method);
+ POSIX_ENSURE_REF(stuffer_action);
struct s2n_stuffer *io = &conn->handshake.io;
const struct s2n_kex *hybrid_kex_0 = conn->secure.cipher_suite->key_exchange_alg->hybrid[0];
const struct s2n_kex *hybrid_kex_1 = conn->secure.cipher_suite->key_exchange_alg->hybrid[1];
/* Keep a copy to the start of the entire hybrid client key exchange message for the hybrid PRF */
- struct s2n_blob *client_key_exchange_message = &conn->secure.client_key_exchange_message;
+ struct s2n_blob *client_key_exchange_message = &conn->kex_params.client_key_exchange_message;
client_key_exchange_message->data = stuffer_action(io, 0);
- notnull_check(client_key_exchange_message->data);
+ POSIX_ENSURE_REF(client_key_exchange_message->data);
const uint32_t start_cursor = *cursor;
DEFER_CLEANUP(struct s2n_blob shared_key_0 = {0}, s2n_free);
- GUARD_AS_POSIX(kex_method(hybrid_kex_0, conn, &shared_key_0));
+ POSIX_GUARD_RESULT(kex_method(hybrid_kex_0, conn, &shared_key_0));
- struct s2n_blob *shared_key_1 = &(conn->secure.kem_params.shared_secret);
- GUARD_AS_POSIX(kex_method(hybrid_kex_1, conn, shared_key_1));
+ struct s2n_blob *shared_key_1 = &(conn->kex_params.kem_params.shared_secret);
+ POSIX_GUARD_RESULT(kex_method(hybrid_kex_1, conn, shared_key_1));
const uint32_t end_cursor = *cursor;
- gte_check(end_cursor, start_cursor);
+ POSIX_ENSURE_GTE(end_cursor, start_cursor);
client_key_exchange_message->size = end_cursor - start_cursor;
- GUARD(s2n_alloc(combined_shared_key, shared_key_0.size + shared_key_1->size));
+ POSIX_GUARD(s2n_alloc(combined_shared_key, shared_key_0.size + shared_key_1->size));
struct s2n_stuffer stuffer_combiner = {0};
- GUARD(s2n_stuffer_init(&stuffer_combiner, combined_shared_key));
- GUARD(s2n_stuffer_write(&stuffer_combiner, &shared_key_0));
- GUARD(s2n_stuffer_write(&stuffer_combiner, shared_key_1));
+ POSIX_GUARD(s2n_stuffer_init(&stuffer_combiner, combined_shared_key));
+ POSIX_GUARD(s2n_stuffer_write(&stuffer_combiner, &shared_key_0));
+ POSIX_GUARD(s2n_stuffer_write(&stuffer_combiner, shared_key_1));
- GUARD(s2n_kem_free(&conn->secure.kem_params));
+ POSIX_GUARD(s2n_kem_free(&conn->kex_params.kem_params));
return 0;
}
@@ -81,26 +82,26 @@ static int s2n_hybrid_client_action(struct s2n_connection *conn, struct s2n_blob
static int s2n_calculate_keys(struct s2n_connection *conn, struct s2n_blob *shared_key)
{
/* Turn the pre-master secret into a master secret */
- GUARD_AS_POSIX(s2n_kex_tls_prf(conn->secure.cipher_suite->key_exchange_alg, conn, shared_key));
- /* Erase the pre-master secret */
- GUARD(s2n_blob_zero(shared_key));
- if (shared_key->allocated) {
- GUARD(s2n_free(shared_key));
- }
+ POSIX_GUARD_RESULT(s2n_kex_tls_prf(conn->secure.cipher_suite->key_exchange_alg, conn, shared_key));
+
/* Expand the keys */
- GUARD(s2n_prf_key_expansion(conn));
- /* Save the master secret in the cache */
+ POSIX_GUARD(s2n_prf_key_expansion(conn));
+ /* Save the master secret in the cache.
+ * Failing to cache the session should not affect the current handshake.
+ */
if (s2n_allowed_to_cache_connection(conn)) {
- GUARD(s2n_store_to_cache(conn));
+ s2n_result_ignore(s2n_store_to_cache(conn));
}
+ /* log the secret, if needed */
+ s2n_result_ignore(s2n_key_log_tls12_secret(conn));
return 0;
}
int s2n_rsa_client_key_recv(struct s2n_connection *conn, struct s2n_blob *shared_key)
{
/* Set shared_key before async guard to pass the proper shared_key to the caller upon async completion */
- notnull_check(shared_key);
- shared_key->data = conn->secure.rsa_premaster_secret;
+ POSIX_ENSURE_REF(shared_key);
+ shared_key->data = conn->secrets.tls12.rsa_premaster_secret;
shared_key->size = S2N_TLS_SECRET_LEN;
S2N_ASYNC_PKEY_GUARD(conn);
@@ -112,7 +113,7 @@ int s2n_rsa_client_key_recv(struct s2n_connection *conn, struct s2n_blob *shared
if (conn->actual_protocol_version == S2N_SSLv3) {
length = s2n_stuffer_data_available(in);
} else {
- GUARD(s2n_stuffer_read_uint16(in, &length));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &length));
}
S2N_ERROR_IF(length > s2n_stuffer_data_available(in), S2N_ERR_BAD_MESSAGE);
@@ -127,13 +128,13 @@ int s2n_rsa_client_key_recv(struct s2n_connection *conn, struct s2n_blob *shared
/* Decrypt the pre-master secret */
struct s2n_blob encrypted = {.size = length, .data = s2n_stuffer_raw_read(in, length)};
- notnull_check(encrypted.data);
- gt_check(encrypted.size, 0);
+ POSIX_ENSURE_REF(encrypted.data);
+ POSIX_ENSURE_GT(encrypted.size, 0);
/* First: use a random pre-master secret */
- GUARD_AS_POSIX(s2n_get_private_random_data(shared_key));
- conn->secure.rsa_premaster_secret[0] = client_hello_protocol_version[0];
- conn->secure.rsa_premaster_secret[1] = client_hello_protocol_version[1];
+ POSIX_GUARD_RESULT(s2n_get_private_random_data(shared_key));
+ conn->secrets.tls12.rsa_premaster_secret[0] = client_hello_protocol_version[0];
+ conn->secrets.tls12.rsa_premaster_secret[1] = client_hello_protocol_version[1];
S2N_ASYNC_PKEY_DECRYPT(conn, &encrypted, shared_key, s2n_rsa_client_key_recv_complete);
}
@@ -143,9 +144,9 @@ int s2n_rsa_client_key_recv_complete(struct s2n_connection *conn, bool rsa_faile
S2N_ERROR_IF(decrypted->size != S2N_TLS_SECRET_LEN, S2N_ERR_SIZE_MISMATCH);
/* Avoid copying the same buffer for the case where async pkey is not used */
- if (conn->secure.rsa_premaster_secret != decrypted->data) {
+ if (conn->secrets.tls12.rsa_premaster_secret != decrypted->data) {
/* Copy (maybe) decrypted data into shared key */
- memcpy_check(conn->secure.rsa_premaster_secret, decrypted->data, S2N_TLS_SECRET_LEN);
+ POSIX_CHECKED_MEMCPY(conn->secrets.tls12.rsa_premaster_secret, decrypted->data, S2N_TLS_SECRET_LEN);
}
/* Get client hello protocol version for comparison with decrypted data */
@@ -158,7 +159,7 @@ int s2n_rsa_client_key_recv_complete(struct s2n_connection *conn, bool rsa_faile
/* Set rsa_failed to true, if it isn't already, if the protocol version isn't what we expect */
conn->handshake.rsa_failed |= !s2n_constant_time_equals(client_hello_protocol_version,
- conn->secure.rsa_premaster_secret, S2N_TLS_PROTOCOL_VERSION_LEN);
+ conn->secrets.tls12.rsa_premaster_secret, S2N_TLS_PROTOCOL_VERSION_LEN);
return 0;
}
@@ -168,9 +169,9 @@ int s2n_dhe_client_key_recv(struct s2n_connection *conn, struct s2n_blob *shared
struct s2n_stuffer *in = &conn->handshake.io;
/* Get the shared key */
- GUARD(s2n_dh_compute_shared_secret_as_server(&conn->secure.server_dh_params, in, shared_key));
+ POSIX_GUARD(s2n_dh_compute_shared_secret_as_server(&conn->kex_params.server_dh_params, in, shared_key));
/* We don't need the server params any more */
- GUARD(s2n_dh_params_free(&conn->secure.server_dh_params));
+ POSIX_GUARD(s2n_dh_params_free(&conn->kex_params.server_dh_params));
return 0;
}
@@ -179,16 +180,16 @@ int s2n_ecdhe_client_key_recv(struct s2n_connection *conn, struct s2n_blob *shar
struct s2n_stuffer *in = &conn->handshake.io;
/* Get the shared key */
- GUARD(s2n_ecc_evp_compute_shared_secret_as_server(&conn->secure.server_ecc_evp_params, in, shared_key));
+ POSIX_GUARD(s2n_ecc_evp_compute_shared_secret_as_server(&conn->kex_params.server_ecc_evp_params, in, shared_key));
/* We don't need the server params any more */
- GUARD(s2n_ecc_evp_params_free(&conn->secure.server_ecc_evp_params));
+ POSIX_GUARD(s2n_ecc_evp_params_free(&conn->kex_params.server_ecc_evp_params));
return 0;
}
int s2n_kem_client_key_recv(struct s2n_connection *conn, struct s2n_blob *shared_key)
{
/* s2n_kem_recv_ciphertext() writes the KEM shared secret directly to
- * conn->secure.kem_params. However, the calling function
+ * conn->kex_params.kem_params. However, the calling function
* likely expects *shared_key to point to the shared secret. We
* can't reassign *shared_key to point to kem_params.shared_secret,
* because that would require us to take struct s2n_blob **shared_key
@@ -197,10 +198,10 @@ int s2n_kem_client_key_recv(struct s2n_connection *conn, struct s2n_blob *shared
*
* So, we assert that the caller already has *shared_key pointing
* to kem_params.shared_secret. */
- notnull_check(shared_key);
- S2N_ERROR_IF(shared_key != &(conn->secure.kem_params.shared_secret), S2N_ERR_SAFETY);
+ POSIX_ENSURE_REF(shared_key);
+ S2N_ERROR_IF(shared_key != &(conn->kex_params.kem_params.shared_secret), S2N_ERR_SAFETY);
- GUARD(s2n_kem_recv_ciphertext(&(conn->handshake.io), &(conn->secure.kem_params)));
+ POSIX_GUARD(s2n_kem_recv_ciphertext(&(conn->handshake.io), &(conn->kex_params.kem_params)));
return 0;
}
@@ -214,31 +215,30 @@ int s2n_hybrid_client_key_recv(struct s2n_connection *conn, struct s2n_blob *com
int s2n_client_key_recv(struct s2n_connection *conn)
{
const struct s2n_kex *key_exchange = conn->secure.cipher_suite->key_exchange_alg;
- struct s2n_blob shared_key = {0};
-
- GUARD_AS_POSIX(s2n_kex_client_key_recv(key_exchange, conn, &shared_key));
+ DEFER_CLEANUP(struct s2n_blob shared_key = { 0 }, s2n_blob_zeroize_free);
+ POSIX_GUARD_RESULT(s2n_kex_client_key_recv(key_exchange, conn, &shared_key));
- GUARD(s2n_calculate_keys(conn, &shared_key));
+ POSIX_GUARD(s2n_calculate_keys(conn, &shared_key));
return 0;
}
int s2n_dhe_client_key_send(struct s2n_connection *conn, struct s2n_blob *shared_key)
{
struct s2n_stuffer *out = &conn->handshake.io;
- GUARD(s2n_dh_compute_shared_secret_as_client(&conn->secure.server_dh_params, out, shared_key));
+ POSIX_GUARD(s2n_dh_compute_shared_secret_as_client(&conn->kex_params.server_dh_params, out, shared_key));
/* We don't need the server params any more */
- GUARD(s2n_dh_params_free(&conn->secure.server_dh_params));
+ POSIX_GUARD(s2n_dh_params_free(&conn->kex_params.server_dh_params));
return 0;
}
int s2n_ecdhe_client_key_send(struct s2n_connection *conn, struct s2n_blob *shared_key)
{
struct s2n_stuffer *out = &conn->handshake.io;
- GUARD(s2n_ecc_evp_compute_shared_secret_as_client(&conn->secure.server_ecc_evp_params, out, shared_key));
+ POSIX_GUARD(s2n_ecc_evp_compute_shared_secret_as_client(&conn->kex_params.server_ecc_evp_params, out, shared_key));
/* We don't need the server params any more */
- GUARD(s2n_ecc_evp_params_free(&conn->secure.server_ecc_evp_params));
+ POSIX_GUARD(s2n_ecc_evp_params_free(&conn->kex_params.server_ecc_evp_params));
return 0;
}
@@ -249,42 +249,42 @@ int s2n_rsa_client_key_send(struct s2n_connection *conn, struct s2n_blob *shared
client_hello_protocol_version[0] = legacy_client_hello_protocol_version / 10;
client_hello_protocol_version[1] = legacy_client_hello_protocol_version % 10;
- shared_key->data = conn->secure.rsa_premaster_secret;
+ shared_key->data = conn->secrets.tls12.rsa_premaster_secret;
shared_key->size = S2N_TLS_SECRET_LEN;
- GUARD_AS_POSIX(s2n_get_private_random_data(shared_key));
+ POSIX_GUARD_RESULT(s2n_get_private_random_data(shared_key));
/* Over-write the first two bytes with the client hello version, per RFC2246/RFC4346/RFC5246 7.4.7.1.
* The latest version supported by client (as seen from the the client hello version) are <= TLS1.2
* for all clients, because TLS 1.3 clients freezes the TLS1.2 legacy version in client hello.
*/
- memcpy_check(conn->secure.rsa_premaster_secret, client_hello_protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN);
+ POSIX_CHECKED_MEMCPY(conn->secrets.tls12.rsa_premaster_secret, client_hello_protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN);
uint32_t encrypted_size = 0;
- GUARD_AS_POSIX(s2n_pkey_size(&conn->secure.server_public_key, &encrypted_size));
+ POSIX_GUARD_RESULT(s2n_pkey_size(&conn->handshake_params.server_public_key, &encrypted_size));
S2N_ERROR_IF(encrypted_size > 0xffff, S2N_ERR_SIZE_MISMATCH);
if (conn->actual_protocol_version > S2N_SSLv3) {
- GUARD(s2n_stuffer_write_uint16(&conn->handshake.io, encrypted_size));
+ POSIX_GUARD(s2n_stuffer_write_uint16(&conn->handshake.io, encrypted_size));
}
struct s2n_blob encrypted = {0};
encrypted.data = s2n_stuffer_raw_write(&conn->handshake.io, encrypted_size);
encrypted.size = encrypted_size;
- notnull_check(encrypted.data);
+ POSIX_ENSURE_REF(encrypted.data);
/* Encrypt the secret and send it on */
- GUARD(s2n_pkey_encrypt(&conn->secure.server_public_key, shared_key, &encrypted));
+ POSIX_GUARD(s2n_pkey_encrypt(&conn->handshake_params.server_public_key, shared_key, &encrypted));
/* We don't need the key any more, so free it */
- GUARD(s2n_pkey_free(&conn->secure.server_public_key));
+ POSIX_GUARD(s2n_pkey_free(&conn->handshake_params.server_public_key));
return 0;
}
int s2n_kem_client_key_send(struct s2n_connection *conn, struct s2n_blob *shared_key)
{
/* s2n_kem_send_ciphertext() writes the KEM shared secret directly to
- * conn->secure.kem_params. However, the calling function
+ * conn->kex_params.kem_params. However, the calling function
* likely expects *shared_key to point to the shared secret. We
* can't reassign *shared_key to point to kem_params.shared_secret,
* because that would require us to take struct s2n_blob **shared_key
@@ -293,10 +293,10 @@ int s2n_kem_client_key_send(struct s2n_connection *conn, struct s2n_blob *shared
*
* So, we assert that the caller already has *shared_key pointing
* to kem_params.shared_secret. */
- notnull_check(shared_key);
- S2N_ERROR_IF(shared_key != &(conn->secure.kem_params.shared_secret), S2N_ERR_SAFETY);
+ POSIX_ENSURE_REF(shared_key);
+ S2N_ERROR_IF(shared_key != &(conn->kex_params.kem_params.shared_secret), S2N_ERR_SAFETY);
- GUARD(s2n_kem_send_ciphertext(&(conn->handshake.io), &(conn->secure.kem_params)));
+ POSIX_GUARD(s2n_kem_send_ciphertext(&(conn->handshake.io), &(conn->kex_params.kem_params)));
return 0;
}
@@ -310,10 +310,10 @@ int s2n_hybrid_client_key_send(struct s2n_connection *conn, struct s2n_blob *com
int s2n_client_key_send(struct s2n_connection *conn)
{
const struct s2n_kex *key_exchange = conn->secure.cipher_suite->key_exchange_alg;
- struct s2n_blob shared_key = {0};
+ DEFER_CLEANUP(struct s2n_blob shared_key = { 0 }, s2n_blob_zeroize_free);
- GUARD_AS_POSIX(s2n_kex_client_key_send(key_exchange, conn, &shared_key));
+ POSIX_GUARD_RESULT(s2n_kex_client_key_send(key_exchange, conn, &shared_key));
- GUARD(s2n_calculate_keys(conn, &shared_key));
+ POSIX_GUARD(s2n_calculate_keys(conn, &shared_key));
return 0;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_config.c b/contrib/restricted/aws/s2n/tls/s2n_config.c
index db3a8819bc..7c455bb642 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_config.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_config.c
@@ -22,6 +22,7 @@
#include "crypto/s2n_fips.h"
#include "tls/s2n_cipher_preferences.h"
+#include "tls/s2n_internal.h"
#include "tls/s2n_security_policies.h"
#include "tls/s2n_tls13.h"
#include "utils/s2n_safety.h"
@@ -41,7 +42,7 @@ static int monotonic_clock(void *data, uint64_t *nanoseconds)
{
struct timespec current_time = {0};
- GUARD(clock_gettime(S2N_CLOCK_HW, &current_time));
+ POSIX_GUARD(clock_gettime(S2N_CLOCK_HW, &current_time));
*nanoseconds = (uint64_t)current_time.tv_sec * 1000000000ull;
*nanoseconds += current_time.tv_nsec;
@@ -53,7 +54,7 @@ static int wall_clock(void *data, uint64_t *nanoseconds)
{
struct timespec current_time = {0};
- GUARD(clock_gettime(S2N_CLOCK_SYS, &current_time));
+ POSIX_GUARD(clock_gettime(S2N_CLOCK_SYS, &current_time));
*nanoseconds = (uint64_t)current_time.tv_sec * 1000000000ull;
*nanoseconds += current_time.tv_nsec;
@@ -67,79 +68,54 @@ static struct s2n_config s2n_default_tls13_config = {0};
static int s2n_config_setup_default(struct s2n_config *config)
{
- GUARD(s2n_config_set_cipher_preferences(config, "default"));
+ POSIX_GUARD(s2n_config_set_cipher_preferences(config, "default"));
return S2N_SUCCESS;
}
static int s2n_config_setup_tls13(struct s2n_config *config)
{
- GUARD(s2n_config_set_cipher_preferences(config, "default_tls13"));
+ POSIX_GUARD(s2n_config_set_cipher_preferences(config, "default_tls13"));
return S2N_SUCCESS;
}
static int s2n_config_setup_fips(struct s2n_config *config)
{
- GUARD(s2n_config_set_cipher_preferences(config, "default_fips"));
+ POSIX_GUARD(s2n_config_set_cipher_preferences(config, "default_fips"));
return S2N_SUCCESS;
}
static int s2n_config_init(struct s2n_config *config)
{
- config->cert_allocated = 0;
- config->dhparams = NULL;
- memset(&config->application_protocols, 0, sizeof(config->application_protocols));
config->status_request_type = S2N_STATUS_REQUEST_NONE;
config->wall_clock = wall_clock;
config->monotonic_clock = monotonic_clock;
- config->verify_host = NULL;
- config->data_for_verify_host = NULL;
- config->client_hello_cb = NULL;
- config->client_hello_cb_ctx = NULL;
- config->cache_store = NULL;
- config->cache_store_data = NULL;
- config->cache_retrieve = NULL;
- config->cache_retrieve_data = NULL;
- config->cache_delete = NULL;
- config->cache_delete_data = NULL;
config->ct_type = S2N_CT_SUPPORT_NONE;
config->mfl_code = S2N_TLS_MAX_FRAG_LEN_EXT_NONE;
config->alert_behavior = S2N_ALERT_FAIL_ON_WARNINGS;
- config->accept_mfl = 0;
config->session_state_lifetime_in_nanos = S2N_STATE_LIFETIME_IN_NANOS;
- config->use_tickets = 0;
- config->use_session_cache = 0;
- config->ticket_keys = NULL;
- config->ticket_key_hashes = NULL;
config->encrypt_decrypt_key_lifetime_in_nanos = S2N_TICKET_ENCRYPT_DECRYPT_KEY_LIFETIME_IN_NANOS;
config->decrypt_key_lifetime_in_nanos = S2N_TICKET_DECRYPT_KEY_LIFETIME_IN_NANOS;
- config->quic_enabled = 0;
+ config->async_pkey_validation_mode = S2N_ASYNC_PKEY_VALIDATION_FAST;
/* By default, only the client will authenticate the Server's Certificate. The Server does not request or
* authenticate any client certificates. */
config->client_cert_auth_type = S2N_CERT_AUTH_NONE;
config->check_ocsp = 1;
- config->disable_x509_validation = 0;
- config->max_verify_cert_chain_depth = 0;
- config->max_verify_cert_chain_depth_set = 0;
- config->cert_tiebreak_cb = NULL;
- config->async_pkey_cb = NULL;
- config->psk_selection_cb = NULL;
- config->cert_req_dss_legacy_compat_enabled = 0;
-
- GUARD(s2n_config_setup_default(config));
+
+ config->client_hello_cb_mode = S2N_CLIENT_HELLO_CB_BLOCKING;
+
+ POSIX_GUARD(s2n_config_setup_default(config));
if (s2n_use_default_tls13_config()) {
- GUARD(s2n_config_setup_tls13(config));
+ POSIX_GUARD(s2n_config_setup_tls13(config));
} else if (s2n_is_in_fips_mode()) {
- GUARD(s2n_config_setup_fips(config));
+ POSIX_GUARD(s2n_config_setup_fips(config));
}
- notnull_check(config->domain_name_to_cert_map = s2n_map_new_with_initial_capacity(1));
- GUARD_AS_POSIX(s2n_map_complete(config->domain_name_to_cert_map));
- memset(&config->default_certs_by_type, 0, sizeof(struct certs_by_type));
- config->default_certs_are_explicit = 0;
+ POSIX_GUARD_PTR(config->domain_name_to_cert_map = s2n_map_new_with_initial_capacity(1));
+ POSIX_GUARD_RESULT(s2n_map_complete(config->domain_name_to_cert_map));
s2n_x509_trust_store_init_empty(&config->trust_store);
- s2n_x509_trust_store_from_system_defaults(&config->trust_store);
+ POSIX_GUARD(s2n_x509_trust_store_from_system_defaults(&config->trust_store));
return 0;
}
@@ -149,11 +125,11 @@ static int s2n_config_cleanup(struct s2n_config *config)
s2n_x509_trust_store_wipe(&config->trust_store);
config->check_ocsp = 0;
- GUARD(s2n_config_free_session_ticket_keys(config));
- GUARD(s2n_config_free_cert_chain_and_key(config));
- GUARD(s2n_config_free_dhparams(config));
- GUARD(s2n_free(&config->application_protocols));
- GUARD_AS_POSIX(s2n_map_free(config->domain_name_to_cert_map));
+ POSIX_GUARD(s2n_config_free_session_ticket_keys(config));
+ POSIX_GUARD(s2n_config_free_cert_chain_and_key(config));
+ POSIX_GUARD(s2n_config_free_dhparams(config));
+ POSIX_GUARD(s2n_free(&config->application_protocols));
+ POSIX_GUARD_RESULT(s2n_map_free(config->domain_name_to_cert_map));
return 0;
}
@@ -162,8 +138,8 @@ static int s2n_config_update_domain_name_to_cert_map(struct s2n_config *config,
struct s2n_blob *name,
struct s2n_cert_chain_and_key *cert_key_pair)
{
- notnull_check(config);
- notnull_check(name);
+ POSIX_ENSURE_REF(config);
+ POSIX_ENSURE_REF(name);
struct s2n_map *domain_name_to_cert_map = config->domain_name_to_cert_map;
/* s2n_map does not allow zero-size key */
@@ -173,16 +149,16 @@ static int s2n_config_update_domain_name_to_cert_map(struct s2n_config *config,
s2n_pkey_type cert_type = s2n_cert_chain_and_key_get_pkey_type(cert_key_pair);
struct s2n_blob s2n_map_value = { 0 };
bool key_found = false;
- GUARD_AS_POSIX(s2n_map_lookup(domain_name_to_cert_map, name, &s2n_map_value, &key_found));
+ POSIX_GUARD_RESULT(s2n_map_lookup(domain_name_to_cert_map, name, &s2n_map_value, &key_found));
if (!key_found) {
struct certs_by_type value = {{ 0 }};
value.certs[cert_type] = cert_key_pair;
s2n_map_value.data = (uint8_t *) &value;
s2n_map_value.size = sizeof(struct certs_by_type);
- GUARD_AS_POSIX(s2n_map_unlock(domain_name_to_cert_map));
- GUARD_AS_POSIX(s2n_map_add(domain_name_to_cert_map, name, &s2n_map_value));
- GUARD_AS_POSIX(s2n_map_complete(domain_name_to_cert_map));
+ POSIX_GUARD_RESULT(s2n_map_unlock(domain_name_to_cert_map));
+ POSIX_GUARD_RESULT(s2n_map_add(domain_name_to_cert_map, name, &s2n_map_value));
+ POSIX_GUARD_RESULT(s2n_map_complete(domain_name_to_cert_map));
} else {
struct certs_by_type *value = (void *) s2n_map_value.data;;
if (value->certs[cert_type] == NULL) {
@@ -211,21 +187,21 @@ static int s2n_config_build_domain_name_to_cert_map(struct s2n_config *config, s
{
uint32_t cn_len = 0;
- GUARD_AS_POSIX(s2n_array_num_elements(cert_key_pair->cn_names, &cn_len));
+ POSIX_GUARD_RESULT(s2n_array_num_elements(cert_key_pair->cn_names, &cn_len));
uint32_t san_len = 0;
- GUARD_AS_POSIX(s2n_array_num_elements(cert_key_pair->san_names, &san_len));
+ POSIX_GUARD_RESULT(s2n_array_num_elements(cert_key_pair->san_names, &san_len));
if (san_len == 0) {
for (uint32_t i = 0; i < cn_len; i++) {
struct s2n_blob *cn_name = NULL;
- GUARD_AS_POSIX(s2n_array_get(cert_key_pair->cn_names, i, (void **)&cn_name));
- GUARD(s2n_config_update_domain_name_to_cert_map(config, cn_name, cert_key_pair));
+ POSIX_GUARD_RESULT(s2n_array_get(cert_key_pair->cn_names, i, (void **)&cn_name));
+ POSIX_GUARD(s2n_config_update_domain_name_to_cert_map(config, cn_name, cert_key_pair));
}
} else {
for (uint32_t i = 0; i < san_len; i++) {
struct s2n_blob *san_name = NULL;
- GUARD_AS_POSIX(s2n_array_get(cert_key_pair->san_names, i, (void **)&san_name));
- GUARD(s2n_config_update_domain_name_to_cert_map(config, san_name, cert_key_pair));
+ POSIX_GUARD_RESULT(s2n_array_get(cert_key_pair->san_names, i, (void **)&san_name));
+ POSIX_GUARD(s2n_config_update_domain_name_to_cert_map(config, san_name, cert_key_pair));
}
}
@@ -255,25 +231,27 @@ int s2n_config_set_unsafe_for_testing(struct s2n_config *config)
int s2n_config_defaults_init(void)
{
- /* Set up default */
- GUARD(s2n_config_init(&s2n_default_config));
- GUARD(s2n_config_setup_default(&s2n_default_config));
-
/* Set up fips defaults */
- GUARD(s2n_config_init(&s2n_default_fips_config));
- GUARD(s2n_config_setup_fips(&s2n_default_fips_config));
+ if (s2n_is_in_fips_mode()) {
+ POSIX_GUARD(s2n_config_init(&s2n_default_fips_config));
+ POSIX_GUARD(s2n_config_setup_fips(&s2n_default_fips_config));
+ } else {
+ /* Set up default */
+ POSIX_GUARD(s2n_config_init(&s2n_default_config));
+ POSIX_GUARD(s2n_config_setup_default(&s2n_default_config));
+ }
/* Set up TLS 1.3 defaults */
- GUARD(s2n_config_init(&s2n_default_tls13_config));
- GUARD(s2n_config_setup_tls13(&s2n_default_tls13_config));
+ POSIX_GUARD(s2n_config_init(&s2n_default_tls13_config));
+ POSIX_GUARD(s2n_config_setup_tls13(&s2n_default_tls13_config));
return S2N_SUCCESS;
}
void s2n_wipe_static_configs(void)
{
- s2n_config_cleanup(&s2n_default_config);
s2n_config_cleanup(&s2n_default_fips_config);
+ s2n_config_cleanup(&s2n_default_config);
s2n_config_cleanup(&s2n_default_tls13_config);
}
@@ -282,7 +260,8 @@ struct s2n_config *s2n_config_new(void)
struct s2n_blob allocator = {0};
struct s2n_config *new_config;
- GUARD_PTR(s2n_alloc(&allocator, sizeof(struct s2n_config)));
+ PTR_GUARD_POSIX(s2n_alloc(&allocator, sizeof(struct s2n_config)));
+ PTR_GUARD_POSIX(s2n_blob_zero(&allocator));
new_config = (struct s2n_config *)(void *)allocator.data;
if (s2n_config_init(new_config) != S2N_SUCCESS) {
@@ -310,11 +289,11 @@ static int s2n_verify_unique_ticket_key_comparator(const void *a, const void *b)
int s2n_config_init_session_ticket_keys(struct s2n_config *config)
{
if (config->ticket_keys == NULL) {
- notnull_check(config->ticket_keys = s2n_set_new(sizeof(struct s2n_ticket_key), s2n_config_store_ticket_key_comparator));
+ POSIX_ENSURE_REF(config->ticket_keys = s2n_set_new(sizeof(struct s2n_ticket_key), s2n_config_store_ticket_key_comparator));
}
if (config->ticket_key_hashes == NULL) {
- notnull_check(config->ticket_key_hashes = s2n_set_new(SHA_DIGEST_LENGTH, s2n_verify_unique_ticket_key_comparator));
+ POSIX_ENSURE_REF(config->ticket_key_hashes = s2n_set_new(SHA_DIGEST_LENGTH, s2n_verify_unique_ticket_key_comparator));
}
return 0;
@@ -323,11 +302,11 @@ int s2n_config_init_session_ticket_keys(struct s2n_config *config)
int s2n_config_free_session_ticket_keys(struct s2n_config *config)
{
if (config->ticket_keys != NULL) {
- GUARD_AS_POSIX(s2n_set_free_p(&config->ticket_keys));
+ POSIX_GUARD_RESULT(s2n_set_free_p(&config->ticket_keys));
}
if (config->ticket_key_hashes != NULL) {
- GUARD_AS_POSIX(s2n_set_free_p(&config->ticket_key_hashes));
+ POSIX_GUARD_RESULT(s2n_set_free_p(&config->ticket_key_hashes));
}
return 0;
@@ -349,39 +328,47 @@ int s2n_config_free_cert_chain_and_key(struct s2n_config *config)
int s2n_config_free_dhparams(struct s2n_config *config)
{
if (config->dhparams) {
- GUARD(s2n_dh_params_free(config->dhparams));
+ POSIX_GUARD(s2n_dh_params_free(config->dhparams));
}
- GUARD(s2n_free_object((uint8_t **)&config->dhparams, sizeof(struct s2n_dh_params)));
+ POSIX_GUARD(s2n_free_object((uint8_t **)&config->dhparams, sizeof(struct s2n_dh_params)));
return 0;
}
+S2N_CLEANUP_RESULT s2n_config_ptr_free(struct s2n_config **config)
+{
+ RESULT_ENSURE_REF(config);
+ RESULT_GUARD_POSIX(s2n_config_free(*config));
+ *config = NULL;
+ return S2N_RESULT_OK;
+}
+
int s2n_config_free(struct s2n_config *config)
{
s2n_config_cleanup(config);
- GUARD(s2n_free_object((uint8_t **)&config, sizeof(struct s2n_config)));
+ POSIX_GUARD(s2n_free_object((uint8_t **)&config, sizeof(struct s2n_config)));
return 0;
}
int s2n_config_get_client_auth_type(struct s2n_config *config, s2n_cert_auth_type *client_auth_type)
{
- notnull_check(config);
- notnull_check(client_auth_type);
+ POSIX_ENSURE_REF(config);
+ POSIX_ENSURE_REF(client_auth_type);
*client_auth_type = config->client_cert_auth_type;
return 0;
}
int s2n_config_set_client_auth_type(struct s2n_config *config, s2n_cert_auth_type client_auth_type)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
config->client_cert_auth_type = client_auth_type;
return 0;
}
int s2n_config_set_ct_support_level(struct s2n_config *config, s2n_ct_support_level type)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
config->ct_type = type;
return 0;
@@ -389,7 +376,7 @@ int s2n_config_set_ct_support_level(struct s2n_config *config, s2n_ct_support_le
int s2n_config_set_alert_behavior(struct s2n_config *config, s2n_alert_behavior alert_behavior)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
switch (alert_behavior) {
case S2N_ALERT_FAIL_ON_WARNINGS:
@@ -397,7 +384,7 @@ int s2n_config_set_alert_behavior(struct s2n_config *config, s2n_alert_behavior
config->alert_behavior = alert_behavior;
break;
default:
- S2N_ERROR(S2N_ERR_INVALID_ARGUMENT);
+ POSIX_BAIL(S2N_ERR_INVALID_ARGUMENT);
}
return 0;
@@ -405,7 +392,7 @@ int s2n_config_set_alert_behavior(struct s2n_config *config, s2n_alert_behavior
int s2n_config_set_verify_host_callback(struct s2n_config *config, s2n_verify_host_fn verify_host_fn, void *data)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
config->verify_host = verify_host_fn;
config->data_for_verify_host = data;
return 0;
@@ -413,7 +400,7 @@ int s2n_config_set_verify_host_callback(struct s2n_config *config, s2n_verify_ho
int s2n_config_set_check_stapled_ocsp_response(struct s2n_config *config, uint8_t check_ocsp)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
S2N_ERROR_IF(check_ocsp && !s2n_x509_ocsp_stapling_supported(), S2N_ERR_OCSP_NOT_SUPPORTED);
config->check_ocsp = check_ocsp;
return 0;
@@ -421,7 +408,7 @@ int s2n_config_set_check_stapled_ocsp_response(struct s2n_config *config, uint8_
int s2n_config_disable_x509_verification(struct s2n_config *config)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
s2n_x509_trust_store_wipe(&config->trust_store);
config->disable_x509_validation = 1;
return 0;
@@ -429,7 +416,7 @@ int s2n_config_disable_x509_verification(struct s2n_config *config)
int s2n_config_set_max_cert_chain_depth(struct s2n_config *config, uint16_t max_depth)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
S2N_ERROR_IF(max_depth == 0, S2N_ERR_INVALID_ARGUMENT);
config->max_verify_cert_chain_depth = max_depth;
@@ -442,25 +429,34 @@ int s2n_config_set_status_request_type(struct s2n_config *config, s2n_status_req
{
S2N_ERROR_IF(type == S2N_STATUS_REQUEST_OCSP && !s2n_x509_ocsp_stapling_supported(), S2N_ERR_OCSP_NOT_SUPPORTED);
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
config->status_request_type = type;
return 0;
}
+int s2n_config_wipe_trust_store(struct s2n_config *config)
+{
+ POSIX_ENSURE_REF(config);
+
+ s2n_x509_trust_store_wipe(&config->trust_store);
+
+ return S2N_SUCCESS;
+}
+
int s2n_config_add_pem_to_trust_store(struct s2n_config *config, const char *pem)
{
- notnull_check(config);
- notnull_check(pem);
+ POSIX_ENSURE_REF(config);
+ POSIX_ENSURE_REF(pem);
- GUARD(s2n_x509_trust_store_add_pem(&config->trust_store, pem));
+ POSIX_GUARD(s2n_x509_trust_store_add_pem(&config->trust_store, pem));
return 0;
}
int s2n_config_set_verification_ca_location(struct s2n_config *config, const char *ca_pem_filename, const char *ca_dir)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
int err_code = s2n_x509_trust_store_from_ca_file(&config->trust_store, ca_pem_filename, ca_dir);
if (!err_code) {
@@ -474,9 +470,9 @@ int s2n_config_set_verification_ca_location(struct s2n_config *config, const cha
int s2n_config_add_cert_chain_and_key(struct s2n_config *config, const char *cert_chain_pem, const char *private_key_pem)
{
struct s2n_cert_chain_and_key *chain_and_key;
- notnull_check(chain_and_key = s2n_cert_chain_and_key_new());
- GUARD(s2n_cert_chain_and_key_load_pem(chain_and_key, cert_chain_pem, private_key_pem));
- GUARD(s2n_config_add_cert_chain_and_key_to_store(config, chain_and_key));
+ POSIX_ENSURE_REF(chain_and_key = s2n_cert_chain_and_key_new());
+ POSIX_GUARD(s2n_cert_chain_and_key_load_pem(chain_and_key, cert_chain_pem, private_key_pem));
+ POSIX_GUARD(s2n_config_add_cert_chain_and_key_to_store(config, chain_and_key));
config->cert_allocated = 1;
return 0;
@@ -484,26 +480,30 @@ int s2n_config_add_cert_chain_and_key(struct s2n_config *config, const char *cer
int s2n_config_add_cert_chain_and_key_to_store(struct s2n_config *config, struct s2n_cert_chain_and_key *cert_key_pair)
{
- notnull_check(config->domain_name_to_cert_map);
- notnull_check(cert_key_pair);
-
- GUARD(s2n_config_build_domain_name_to_cert_map(config, cert_key_pair));
+ POSIX_ENSURE_REF(config->domain_name_to_cert_map);
+ POSIX_ENSURE_REF(cert_key_pair);
+ s2n_pkey_type cert_type = s2n_cert_chain_and_key_get_pkey_type(cert_key_pair);
+ config->is_rsa_cert_configured |= (cert_type == S2N_PKEY_TYPE_RSA);
+ POSIX_GUARD(s2n_config_build_domain_name_to_cert_map(config, cert_key_pair));
if (!config->default_certs_are_explicit) {
/* Attempt to auto set default based on ordering. ie: first RSA cert is the default, first ECDSA cert is the
* default, etc. */
- s2n_pkey_type cert_type = s2n_cert_chain_and_key_get_pkey_type(cert_key_pair);
if (config->default_certs_by_type.certs[cert_type] == NULL) {
config->default_certs_by_type.certs[cert_type] = cert_key_pair;
}
}
- return 0;
+ if (s2n_pkey_check_key_exists(cert_key_pair->private_key) != S2N_SUCCESS) {
+ config->no_signing_key = true;
+ }
+
+ return S2N_SUCCESS;
}
int s2n_config_set_async_pkey_callback(struct s2n_config *config, s2n_async_pkey_fn fn)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
config->async_pkey_cb = fn;
@@ -512,7 +512,7 @@ int s2n_config_set_async_pkey_callback(struct s2n_config *config, s2n_async_pkey
int s2n_config_clear_default_certificates(struct s2n_config *config)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
for (int i = 0; i < S2N_CERT_TYPE_COUNT; i++) {
config->default_certs_by_type.certs[i] = NULL;
}
@@ -523,23 +523,24 @@ int s2n_config_set_cert_chain_and_key_defaults(struct s2n_config *config,
struct s2n_cert_chain_and_key **cert_key_pairs,
uint32_t num_cert_key_pairs)
{
- notnull_check(config);
- notnull_check(cert_key_pairs);
+ POSIX_ENSURE_REF(config);
+ POSIX_ENSURE_REF(cert_key_pairs);
S2N_ERROR_IF(num_cert_key_pairs < 1 || num_cert_key_pairs > S2N_CERT_TYPE_COUNT,
S2N_ERR_NUM_DEFAULT_CERTIFICATES);
/* Validate certs being set before clearing auto-chosen defaults or previously set defaults */
struct certs_by_type new_defaults = {{ 0 }};
- for (int i = 0; i < num_cert_key_pairs; i++) {
- notnull_check(cert_key_pairs[i]);
+ for (uint32_t i = 0; i < num_cert_key_pairs; i++) {
+ POSIX_ENSURE_REF(cert_key_pairs[i]);
s2n_pkey_type cert_type = s2n_cert_chain_and_key_get_pkey_type(cert_key_pairs[i]);
S2N_ERROR_IF(new_defaults.certs[cert_type] != NULL, S2N_ERR_MULTIPLE_DEFAULT_CERTIFICATES_PER_AUTH_TYPE);
new_defaults.certs[cert_type] = cert_key_pairs[i];
}
- GUARD(s2n_config_clear_default_certificates(config));
- for (int i = 0; i < num_cert_key_pairs; i++) {
+ POSIX_GUARD(s2n_config_clear_default_certificates(config));
+ for (uint32_t i = 0; i < num_cert_key_pairs; i++) {
s2n_pkey_type cert_type = s2n_cert_chain_and_key_get_pkey_type(cert_key_pairs[i]);
+ config->is_rsa_cert_configured |= (cert_type == S2N_PKEY_TYPE_RSA);
config->default_certs_by_type.certs[cert_type] = cert_key_pairs[i];
}
@@ -555,7 +556,7 @@ int s2n_config_add_dhparams(struct s2n_config *config, const char *dhparams_pem)
struct s2n_blob mem = {0};
/* Allocate the memory for the chain and key struct */
- GUARD(s2n_alloc(&mem, sizeof(struct s2n_dh_params)));
+ POSIX_GUARD(s2n_alloc(&mem, sizeof(struct s2n_dh_params)));
config->dhparams = (struct s2n_dh_params *)(void *)mem.data;
if (s2n_stuffer_alloc_ro_from_string(&dhparams_in_stuffer, dhparams_pem) != S2N_SUCCESS) {
@@ -568,20 +569,20 @@ int s2n_config_add_dhparams(struct s2n_config *config, const char *dhparams_pem)
}
/* Convert pem to asn1 and asn1 to the private key */
- GUARD(s2n_stuffer_dhparams_from_pem(&dhparams_in_stuffer, &dhparams_out_stuffer));
+ POSIX_GUARD(s2n_stuffer_dhparams_from_pem(&dhparams_in_stuffer, &dhparams_out_stuffer));
dhparams_blob.size = s2n_stuffer_data_available(&dhparams_out_stuffer);
dhparams_blob.data = s2n_stuffer_raw_read(&dhparams_out_stuffer, dhparams_blob.size);
- notnull_check(dhparams_blob.data);
+ POSIX_ENSURE_REF(dhparams_blob.data);
- GUARD(s2n_pkcs3_to_dh_params(config->dhparams, &dhparams_blob));
+ POSIX_GUARD(s2n_pkcs3_to_dh_params(config->dhparams, &dhparams_blob));
return 0;
}
extern int s2n_config_set_wall_clock(struct s2n_config *config, s2n_clock_time_nanoseconds clock_fn, void *ctx)
{
- notnull_check(clock_fn);
+ POSIX_ENSURE_REF(clock_fn);
config->wall_clock = clock_fn;
config->sys_clock_ctx = ctx;
@@ -591,7 +592,7 @@ extern int s2n_config_set_wall_clock(struct s2n_config *config, s2n_clock_time_n
extern int s2n_config_set_monotonic_clock(struct s2n_config *config, s2n_clock_time_nanoseconds clock_fn, void *ctx)
{
- notnull_check(clock_fn);
+ POSIX_ENSURE_REF(clock_fn);
config->monotonic_clock = clock_fn;
config->monotonic_clock_ctx = ctx;
@@ -601,7 +602,7 @@ extern int s2n_config_set_monotonic_clock(struct s2n_config *config, s2n_clock_t
int s2n_config_set_cache_store_callback(struct s2n_config *config, s2n_cache_store_callback cache_store_callback, void *data)
{
- notnull_check(cache_store_callback);
+ POSIX_ENSURE_REF(cache_store_callback);
config->cache_store = cache_store_callback;
config->cache_store_data = data;
@@ -611,7 +612,7 @@ int s2n_config_set_cache_store_callback(struct s2n_config *config, s2n_cache_sto
int s2n_config_set_cache_retrieve_callback(struct s2n_config *config, s2n_cache_retrieve_callback cache_retrieve_callback, void *data)
{
- notnull_check(cache_retrieve_callback);
+ POSIX_ENSURE_REF(cache_retrieve_callback);
config->cache_retrieve = cache_retrieve_callback;
config->cache_retrieve_data = data;
@@ -621,7 +622,7 @@ int s2n_config_set_cache_retrieve_callback(struct s2n_config *config, s2n_cache_
int s2n_config_set_cache_delete_callback(struct s2n_config *config, s2n_cache_delete_callback cache_delete_callback, void *data)
{
- notnull_check(cache_delete_callback);
+ POSIX_ENSURE_REF(cache_delete_callback);
config->cache_delete = cache_delete_callback;
config->cache_delete_data = data;
@@ -631,25 +632,25 @@ int s2n_config_set_cache_delete_callback(struct s2n_config *config, s2n_cache_de
int s2n_config_set_extension_data(struct s2n_config *config, s2n_tls_extension_type type, const uint8_t *data, uint32_t length)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
if (s2n_config_get_num_default_certs(config) == 0) {
- S2N_ERROR(S2N_ERR_UPDATING_EXTENSION);
+ POSIX_BAIL(S2N_ERR_UPDATING_EXTENSION);
}
struct s2n_cert_chain_and_key *config_chain_and_key = s2n_config_get_single_default_cert(config);
- notnull_check(config_chain_and_key);
+ POSIX_ENSURE_REF(config_chain_and_key);
switch (type) {
case S2N_EXTENSION_CERTIFICATE_TRANSPARENCY:
{
- GUARD(s2n_cert_chain_and_key_set_sct_list(config_chain_and_key, data, length));
+ POSIX_GUARD(s2n_cert_chain_and_key_set_sct_list(config_chain_and_key, data, length));
} break;
case S2N_EXTENSION_OCSP_STAPLING:
{
- GUARD(s2n_cert_chain_and_key_set_ocsp_data(config_chain_and_key, data, length));
+ POSIX_GUARD(s2n_cert_chain_and_key_set_ocsp_data(config_chain_and_key, data, length));
} break;
default:
- S2N_ERROR(S2N_ERR_UNRECOGNIZED_EXTENSION);
+ POSIX_BAIL(S2N_ERR_UNRECOGNIZED_EXTENSION);
}
return 0;
@@ -657,15 +658,26 @@ int s2n_config_set_extension_data(struct s2n_config *config, s2n_tls_extension_t
int s2n_config_set_client_hello_cb(struct s2n_config *config, s2n_client_hello_fn client_hello_cb, void *ctx)
{
+ POSIX_ENSURE_REF(config);
+
config->client_hello_cb = client_hello_cb;
config->client_hello_cb_ctx = ctx;
-
return 0;
}
+int s2n_config_set_client_hello_cb_mode(struct s2n_config *config, s2n_client_hello_cb_mode cb_mode)
+{
+ POSIX_ENSURE_REF(config);
+ POSIX_ENSURE(cb_mode == S2N_CLIENT_HELLO_CB_BLOCKING ||
+ cb_mode == S2N_CLIENT_HELLO_CB_NONBLOCKING, S2N_ERR_INVALID_STATE);
+
+ config->client_hello_cb_mode = cb_mode;
+ return S2N_SUCCESS;
+}
+
int s2n_config_send_max_fragment_length(struct s2n_config *config, s2n_max_frag_len mfl_code)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
S2N_ERROR_IF(mfl_code > S2N_TLS_MAX_FRAG_LEN_4096, S2N_ERR_INVALID_MAX_FRAG_LEN);
@@ -676,7 +688,7 @@ int s2n_config_send_max_fragment_length(struct s2n_config *config, s2n_max_frag_
int s2n_config_accept_max_fragment_length(struct s2n_config *config)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
config->accept_mfl = 1;
@@ -686,7 +698,7 @@ int s2n_config_accept_max_fragment_length(struct s2n_config *config)
int s2n_config_set_session_state_lifetime(struct s2n_config *config,
uint64_t lifetime_in_secs)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
config->session_state_lifetime_in_nanos = (lifetime_in_secs * ONE_SEC_IN_NANOS);
return 0;
@@ -694,7 +706,7 @@ int s2n_config_set_session_state_lifetime(struct s2n_config *config,
int s2n_config_set_session_tickets_onoff(struct s2n_config *config, uint8_t enabled)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
if (config->use_tickets == enabled) {
return 0;
@@ -702,11 +714,18 @@ int s2n_config_set_session_tickets_onoff(struct s2n_config *config, uint8_t enab
config->use_tickets = enabled;
+ if (config->initial_tickets_to_send == 0) {
+ /* Normally initial_tickets_to_send is set via s2n_config_set_initial_ticket_count.
+ * However, s2n_config_set_initial_ticket_count calls this method.
+ * So we set initial_tickets_to_send directly to avoid infinite recursion. */
+ config->initial_tickets_to_send = 1;
+ }
+
/* session ticket || session id is enabled */
if (enabled) {
- GUARD(s2n_config_init_session_ticket_keys(config));
+ POSIX_GUARD(s2n_config_init_session_ticket_keys(config));
} else if (!config->use_session_cache) {
- GUARD(s2n_config_free_session_ticket_keys(config));
+ POSIX_GUARD(s2n_config_free_session_ticket_keys(config));
}
return 0;
@@ -714,14 +733,14 @@ int s2n_config_set_session_tickets_onoff(struct s2n_config *config, uint8_t enab
int s2n_config_set_session_cache_onoff(struct s2n_config *config, uint8_t enabled)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
if (enabled && config->cache_store && config->cache_retrieve && config->cache_delete) {
- GUARD(s2n_config_init_session_ticket_keys(config));
+ POSIX_GUARD(s2n_config_init_session_ticket_keys(config));
config->use_session_cache = 1;
}
else {
if (!config->use_tickets) {
- GUARD(s2n_config_free_session_ticket_keys(config));
+ POSIX_GUARD(s2n_config_free_session_ticket_keys(config));
}
config->use_session_cache = 0;
}
@@ -731,7 +750,7 @@ int s2n_config_set_session_cache_onoff(struct s2n_config *config, uint8_t enable
int s2n_config_set_ticket_encrypt_decrypt_key_lifetime(struct s2n_config *config,
uint64_t lifetime_in_secs)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
config->encrypt_decrypt_key_lifetime_in_nanos = (lifetime_in_secs * ONE_SEC_IN_NANOS);
return 0;
@@ -740,7 +759,7 @@ int s2n_config_set_ticket_encrypt_decrypt_key_lifetime(struct s2n_config *config
int s2n_config_set_ticket_decrypt_key_lifetime(struct s2n_config *config,
uint64_t lifetime_in_secs)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
config->decrypt_key_lifetime_in_nanos = (lifetime_in_secs * ONE_SEC_IN_NANOS);
return 0;
@@ -751,21 +770,21 @@ int s2n_config_add_ticket_crypto_key(struct s2n_config *config,
uint8_t *key, uint32_t key_len,
uint64_t intro_time_in_seconds_from_epoch)
{
- notnull_check(config);
- notnull_check(name);
- notnull_check(key);
+ POSIX_ENSURE_REF(config);
+ POSIX_ENSURE_REF(name);
+ POSIX_ENSURE_REF(key);
/* both session ticket and session cache encryption/decryption can use the same key mechanism */
if (!config->use_tickets && !config->use_session_cache) {
return 0;
}
- GUARD(s2n_config_wipe_expired_ticket_crypto_keys(config, -1));
+ POSIX_GUARD(s2n_config_wipe_expired_ticket_crypto_keys(config, -1));
S2N_ERROR_IF(key_len == 0, S2N_ERR_INVALID_TICKET_KEY_LENGTH);
uint32_t ticket_keys_len = 0;
- GUARD_AS_POSIX(s2n_set_len(config->ticket_keys, &ticket_keys_len));
+ POSIX_GUARD_RESULT(s2n_set_len(config->ticket_keys, &ticket_keys_len));
S2N_ERROR_IF(ticket_keys_len >= S2N_MAX_TICKET_KEYS, S2N_ERR_TICKET_KEY_LIMIT);
S2N_ERROR_IF(name_len == 0 || name_len > S2N_TICKET_KEY_NAME_LEN || s2n_find_ticket_key(config, name), S2N_ERR_INVALID_TICKET_KEY_NAME_OR_NAME_LENGTH);
@@ -778,45 +797,45 @@ int s2n_config_add_ticket_crypto_key(struct s2n_config *config,
struct s2n_ticket_key *session_ticket_key;
DEFER_CLEANUP(struct s2n_blob allocator = {0}, s2n_free);
- GUARD(s2n_alloc(&allocator, sizeof(struct s2n_ticket_key)));
+ POSIX_GUARD(s2n_alloc(&allocator, sizeof(struct s2n_ticket_key)));
session_ticket_key = (struct s2n_ticket_key *) (void *) allocator.data;
DEFER_CLEANUP(struct s2n_hmac_state hmac = {0}, s2n_hmac_free);
- GUARD(s2n_hmac_new(&hmac));
- GUARD(s2n_hkdf(&hmac, S2N_HMAC_SHA256, &salt, &in_key, &info, &out_key));
+ POSIX_GUARD(s2n_hmac_new(&hmac));
+ POSIX_GUARD(s2n_hkdf(&hmac, S2N_HMAC_SHA256, &salt, &in_key, &info, &out_key));
DEFER_CLEANUP(struct s2n_hash_state hash = {0}, s2n_hash_free);
uint8_t hash_output[SHA_DIGEST_LENGTH];
- GUARD(s2n_hash_new(&hash));
- GUARD(s2n_hash_init(&hash, S2N_HASH_SHA1));
- GUARD(s2n_hash_update(&hash, out_key.data, out_key.size));
- GUARD(s2n_hash_digest(&hash, hash_output, SHA_DIGEST_LENGTH));
+ POSIX_GUARD(s2n_hash_new(&hash));
+ POSIX_GUARD(s2n_hash_init(&hash, S2N_HASH_SHA1));
+ POSIX_GUARD(s2n_hash_update(&hash, out_key.data, out_key.size));
+ POSIX_GUARD(s2n_hash_digest(&hash, hash_output, SHA_DIGEST_LENGTH));
- GUARD_AS_POSIX(s2n_set_len(config->ticket_keys, &ticket_keys_len));
+ POSIX_GUARD_RESULT(s2n_set_len(config->ticket_keys, &ticket_keys_len));
if (ticket_keys_len >= S2N_MAX_TICKET_KEY_HASHES) {
- GUARD_AS_POSIX(s2n_set_free_p(&config->ticket_key_hashes));
- notnull_check(config->ticket_key_hashes = s2n_set_new(SHA_DIGEST_LENGTH, s2n_verify_unique_ticket_key_comparator));
+ POSIX_GUARD_RESULT(s2n_set_free_p(&config->ticket_key_hashes));
+ POSIX_ENSURE_REF(config->ticket_key_hashes = s2n_set_new(SHA_DIGEST_LENGTH, s2n_verify_unique_ticket_key_comparator));
}
/* Insert hash key into a sorted array at known index */
- GUARD_AS_POSIX(s2n_set_add(config->ticket_key_hashes, hash_output));
+ POSIX_GUARD_RESULT(s2n_set_add(config->ticket_key_hashes, hash_output));
- memcpy_check(session_ticket_key->key_name, name, S2N_TICKET_KEY_NAME_LEN);
- memcpy_check(session_ticket_key->aes_key, out_key.data, S2N_AES256_KEY_LEN);
+ POSIX_CHECKED_MEMCPY(session_ticket_key->key_name, name, S2N_TICKET_KEY_NAME_LEN);
+ POSIX_CHECKED_MEMCPY(session_ticket_key->aes_key, out_key.data, S2N_AES256_KEY_LEN);
out_key.data = output_pad + S2N_AES256_KEY_LEN;
- memcpy_check(session_ticket_key->implicit_aad, out_key.data, S2N_TICKET_AAD_IMPLICIT_LEN);
+ POSIX_CHECKED_MEMCPY(session_ticket_key->implicit_aad, out_key.data, S2N_TICKET_AAD_IMPLICIT_LEN);
if (intro_time_in_seconds_from_epoch == 0) {
uint64_t now;
- GUARD(config->wall_clock(config->sys_clock_ctx, &now));
+ POSIX_GUARD(config->wall_clock(config->sys_clock_ctx, &now));
session_ticket_key->intro_timestamp = now;
} else {
session_ticket_key->intro_timestamp = (intro_time_in_seconds_from_epoch * ONE_SEC_IN_NANOS);
}
- GUARD(s2n_config_store_ticket_key(config, session_ticket_key));
+ POSIX_GUARD(s2n_config_store_ticket_key(config, session_ticket_key));
return 0;
}
@@ -829,7 +848,7 @@ int s2n_config_set_cert_tiebreak_callback(struct s2n_config *config, s2n_cert_ti
struct s2n_cert_chain_and_key *s2n_config_get_single_default_cert(struct s2n_config *config)
{
- notnull_check_ptr(config);
+ PTR_ENSURE_REF(config);
struct s2n_cert_chain_and_key *cert = NULL;
for (int i = S2N_CERT_TYPE_COUNT - 1; i >= 0; i--) {
@@ -842,7 +861,7 @@ struct s2n_cert_chain_and_key *s2n_config_get_single_default_cert(struct s2n_con
int s2n_config_get_num_default_certs(struct s2n_config *config)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
int num_certs = 0;
for (int i = 0; i < S2N_CERT_TYPE_COUNT; i++) {
if (config->default_certs_by_type.certs[i] != NULL) {
@@ -855,17 +874,67 @@ int s2n_config_get_num_default_certs(struct s2n_config *config)
int s2n_config_enable_cert_req_dss_legacy_compat(struct s2n_config *config)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
config->cert_req_dss_legacy_compat_enabled = 1;
return S2N_SUCCESS;
}
-int s2n_config_set_psk_selection_callback(struct s2n_connection *conn, s2n_psk_selection_callback cb)
+int s2n_config_set_psk_selection_callback(struct s2n_config *config, s2n_psk_selection_callback cb, void *context)
{
- notnull_check(conn);
- notnull_check(cb);
+ POSIX_ENSURE_REF(config);
+ config->psk_selection_cb = cb;
+ config->psk_selection_ctx = context;
+ return S2N_SUCCESS;
+}
+
+int s2n_config_set_key_log_cb(struct s2n_config *config, s2n_key_log_fn callback, void *ctx) {
+ POSIX_ENSURE_MUT(config);
+
+ config->key_log_cb = callback;
+ config->key_log_ctx = ctx;
+
+ return S2N_SUCCESS;
+}
+
+int s2n_config_set_async_pkey_validation_mode(struct s2n_config *config, s2n_async_pkey_validation_mode mode) {
+ POSIX_ENSURE_REF(config);
+
+ switch(mode) {
+ case S2N_ASYNC_PKEY_VALIDATION_FAST:
+ case S2N_ASYNC_PKEY_VALIDATION_STRICT:
+ config->async_pkey_validation_mode = mode;
+ return S2N_SUCCESS;
+ }
+
+ POSIX_BAIL(S2N_ERR_INVALID_ARGUMENT);
+}
+
+int s2n_config_set_ctx(struct s2n_config *config, void *ctx) {
+ POSIX_ENSURE_REF(config);
+
+ config->context = ctx;
+
+ return S2N_SUCCESS;
+}
+
+int s2n_config_get_ctx(struct s2n_config *config, void **ctx) {
+ POSIX_ENSURE_REF(config);
+ POSIX_ENSURE_REF(ctx);
+
+ *ctx = config->context;
+
+ return S2N_SUCCESS;
+}
+
+/*
+ * Set the client_hello callback behavior to polling.
+ *
+ * Polling means that the callback function can be called multiple times.
+ */
+int s2n_config_client_hello_cb_enable_poll(struct s2n_config *config) {
+ POSIX_ENSURE_REF(config);
- conn->config->psk_selection_cb = cb;
+ config->client_hello_cb_enable_poll = 1;
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_config.h b/contrib/restricted/aws/s2n/tls/s2n_config.h
index c5edf02418..c0068ce133 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_config.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_config.h
@@ -18,11 +18,11 @@
#include "api/s2n.h"
#include "crypto/s2n_certificate.h"
#include "crypto/s2n_dhe.h"
+#include "tls/s2n_psk.h"
#include "tls/s2n_resume.h"
#include "tls/s2n_x509_validator.h"
#include "utils/s2n_blob.h"
#include "utils/s2n_set.h"
-#include "tls/s2n_psk.h"
#define S2N_MAX_TICKET_KEYS 48
#define S2N_MAX_TICKET_KEY_HASHES 500 /* 10KB */
@@ -30,21 +30,37 @@
struct s2n_cipher_preferences;
struct s2n_config {
+ unsigned use_tickets:1;
+
+ /* Whether a connection can be used by a QUIC implementation.
+ * See s2n_quic_support.h */
+ unsigned quic_enabled:1;
+
unsigned cert_allocated:1;
unsigned default_certs_are_explicit:1;
- unsigned use_tickets:1;
unsigned use_session_cache:1;
/* if this is FALSE, server will ignore client's Maximum Fragment Length request */
unsigned accept_mfl:1;
unsigned check_ocsp:1;
unsigned disable_x509_validation:1;
unsigned max_verify_cert_chain_depth_set:1;
- /* Whether a connection can be used by a QUIC implementation.
- * See s2n_quic_support.h */
- unsigned quic_enabled:1;
/* Whether to add dss cert type during a server certificate request.
- * See https://github.com/awslabs/s2n/blob/main/docs/USAGE-GUIDE.md */
+ * See https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md */
unsigned cert_req_dss_legacy_compat_enabled:1;
+ /* Whether any RSA certificates have been configured server-side to send to clients. This is needed so that the
+ * server knows whether or not to self-downgrade to TLS 1.2 if the server is compiled with Openssl 1.0.2 and does
+ * not support RSA PSS signing (which is required for TLS 1.3). */
+ unsigned is_rsa_cert_configured:1;
+ /* It's possible to use a certificate without loading the private key,
+ * but async signing must be enabled. Use this flag to enforce that restriction.
+ */
+ unsigned no_signing_key:1;
+ /*
+ * This option exists to allow for polling the client_hello callback.
+ *
+ * Note: This defaults to false to ensure backwards compatibility.
+ */
+ unsigned client_hello_cb_enable_poll:1;
struct s2n_dh_params *dhparams;
/* Needed until we can deprecate s2n_config_add_cert_chain_and_key. This is
@@ -63,6 +79,8 @@ struct s2n_config {
void *monotonic_clock_ctx;
s2n_client_hello_fn *client_hello_cb;
+ s2n_client_hello_cb_mode client_hello_cb_mode;
+
void *client_hello_cb_ctx;
uint64_t session_state_lifetime_in_nanos;
@@ -98,13 +116,36 @@ struct s2n_config {
uint8_t mfl_code;
+ uint8_t initial_tickets_to_send;
+
struct s2n_x509_trust_store trust_store;
uint16_t max_verify_cert_chain_depth;
s2n_async_pkey_fn async_pkey_cb;
+
s2n_psk_selection_callback psk_selection_cb;
+ void *psk_selection_ctx;
+
+ s2n_key_log_fn key_log_cb;
+ void *key_log_ctx;
+
+ s2n_session_ticket_fn session_ticket_cb;
+ void *session_ticket_ctx;
+
+ s2n_early_data_cb early_data_cb;
+
+ uint32_t server_max_early_data_size;
+
+ s2n_psk_mode psk_mode;
+
+ s2n_async_pkey_validation_mode async_pkey_validation_mode;
+
+ /* The user defined context associated with config */
+ void *context;
};
+S2N_CLEANUP_RESULT s2n_config_ptr_free(struct s2n_config **config);
+
int s2n_config_defaults_init(void);
extern struct s2n_config *s2n_fetch_default_config(void);
int s2n_config_set_unsafe_for_testing(struct s2n_config *config);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_connection.c b/contrib/restricted/aws/s2n/tls/s2n_connection.c
index f2393fd2a1..cf6334164f 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_connection.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_connection.c
@@ -19,8 +19,9 @@
#include <strings.h>
#include <time.h>
#include <unistd.h>
+#include <sys/param.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include <stdbool.h>
#include "crypto/s2n_fips.h"
@@ -34,6 +35,7 @@
#include "tls/s2n_connection_evp_digests.h"
#include "tls/s2n_handshake.h"
#include "tls/s2n_kem.h"
+#include "tls/s2n_internal.h"
#include "tls/s2n_prf.h"
#include "tls/s2n_record.h"
#include "tls/s2n_resume.h"
@@ -43,6 +45,8 @@
#include "crypto/s2n_certificate.h"
#include "crypto/s2n_cipher.h"
+#include "crypto/s2n_crypto.h"
+#include "crypto/s2n_openssl_x509.h"
#include "utils/s2n_blob.h"
#include "utils/s2n_compiler.h"
@@ -55,83 +59,13 @@
#define S2N_SET_KEY_SHARE_LIST_EMPTY(keyshares) (keyshares |= 1)
#define S2N_SET_KEY_SHARE_REQUEST(keyshares, i) (keyshares |= ( 1 << ( i + 1 )))
-static int s2n_connection_new_hashes(struct s2n_connection *conn)
-{
- /* Allocate long-term memory for the Connection's hash states */
- GUARD(s2n_hash_new(&conn->handshake.md5));
- GUARD(s2n_hash_new(&conn->handshake.sha1));
- GUARD(s2n_hash_new(&conn->handshake.sha224));
- GUARD(s2n_hash_new(&conn->handshake.sha256));
- GUARD(s2n_hash_new(&conn->handshake.sha384));
- GUARD(s2n_hash_new(&conn->handshake.sha512));
- GUARD(s2n_hash_new(&conn->handshake.md5_sha1));
- GUARD(s2n_hash_new(&conn->handshake.ccv_hash_copy));
- GUARD(s2n_hash_new(&conn->handshake.prf_md5_hash_copy));
- GUARD(s2n_hash_new(&conn->handshake.prf_sha1_hash_copy));
- GUARD(s2n_hash_new(&conn->handshake.prf_tls12_hash_copy));
- GUARD(s2n_hash_new(&conn->handshake.server_finished_copy));
- GUARD(s2n_hash_new(&conn->prf_space.ssl3.md5));
- GUARD(s2n_hash_new(&conn->prf_space.ssl3.sha1));
- GUARD(s2n_hash_new(&conn->initial.signature_hash));
- GUARD(s2n_hash_new(&conn->secure.signature_hash));
-
- return 0;
-}
-
-static int s2n_connection_init_hashes(struct s2n_connection *conn)
-{
- /* Initialize all of the Connection's hash states */
-
- if (s2n_hash_is_available(S2N_HASH_MD5)) {
- /* Only initialize hashes that use MD5 if available. */
- GUARD(s2n_hash_init(&conn->prf_space.ssl3.md5, S2N_HASH_MD5));
- }
-
-
- /* Allow MD5 for hash states that are used by the PRF. This is required
- * to comply with the TLS 1.0 and 1.1 RFCs and is approved as per
- * NIST Special Publication 800-52 Revision 1.
- */
- if (s2n_is_in_fips_mode()) {
- GUARD(s2n_hash_allow_md5_for_fips(&conn->handshake.md5));
- GUARD(s2n_hash_allow_md5_for_fips(&conn->handshake.prf_md5_hash_copy));
-
- /* Do not check s2n_hash_is_available before initialization. Allow MD5 and
- * SHA-1 for both fips and non-fips mode. This is required to perform the
- * signature checks in the CertificateVerify message in TLS 1.0 and TLS 1.1.
- * This is approved per Nist SP 800-52r1.*/
- GUARD(s2n_hash_allow_md5_for_fips(&conn->handshake.md5_sha1));
- }
-
- GUARD(s2n_hash_init(&conn->handshake.md5, S2N_HASH_MD5));
- GUARD(s2n_hash_init(&conn->handshake.prf_md5_hash_copy, S2N_HASH_MD5));
- GUARD(s2n_hash_init(&conn->handshake.md5_sha1, S2N_HASH_MD5_SHA1));
-
- GUARD(s2n_hash_init(&conn->handshake.sha1, S2N_HASH_SHA1));
- GUARD(s2n_hash_init(&conn->handshake.sha224, S2N_HASH_SHA224));
- GUARD(s2n_hash_init(&conn->handshake.sha256, S2N_HASH_SHA256));
- GUARD(s2n_hash_init(&conn->handshake.sha384, S2N_HASH_SHA384));
- GUARD(s2n_hash_init(&conn->handshake.sha512, S2N_HASH_SHA512));
- GUARD(s2n_hash_init(&conn->handshake.ccv_hash_copy, S2N_HASH_NONE));
- GUARD(s2n_hash_init(&conn->handshake.prf_tls12_hash_copy, S2N_HASH_NONE));
- GUARD(s2n_hash_init(&conn->handshake.server_finished_copy, S2N_HASH_NONE));
- GUARD(s2n_hash_init(&conn->handshake.prf_sha1_hash_copy, S2N_HASH_SHA1));
- GUARD(s2n_hash_init(&conn->prf_space.ssl3.sha1, S2N_HASH_SHA1));
- GUARD(s2n_hash_init(&conn->initial.signature_hash, S2N_HASH_NONE));
- GUARD(s2n_hash_init(&conn->secure.signature_hash, S2N_HASH_NONE));
-
- return 0;
-}
-
static int s2n_connection_new_hmacs(struct s2n_connection *conn)
{
/* Allocate long-term memory for the Connection's HMAC states */
- GUARD(s2n_hmac_new(&conn->initial.client_record_mac));
- GUARD(s2n_hmac_new(&conn->initial.server_record_mac));
- GUARD(s2n_hmac_new(&conn->initial.record_mac_copy_workspace));
- GUARD(s2n_hmac_new(&conn->secure.client_record_mac));
- GUARD(s2n_hmac_new(&conn->secure.server_record_mac));
- GUARD(s2n_hmac_new(&conn->secure.record_mac_copy_workspace));
+ POSIX_GUARD(s2n_hmac_new(&conn->initial.client_record_mac));
+ POSIX_GUARD(s2n_hmac_new(&conn->initial.server_record_mac));
+ POSIX_GUARD(s2n_hmac_new(&conn->secure.client_record_mac));
+ POSIX_GUARD(s2n_hmac_new(&conn->secure.server_record_mac));
return 0;
}
@@ -139,107 +73,92 @@ static int s2n_connection_new_hmacs(struct s2n_connection *conn)
static int s2n_connection_init_hmacs(struct s2n_connection *conn)
{
/* Initialize all of the Connection's HMAC states */
- GUARD(s2n_hmac_init(&conn->initial.client_record_mac, S2N_HMAC_NONE, NULL, 0));
- GUARD(s2n_hmac_init(&conn->initial.server_record_mac, S2N_HMAC_NONE, NULL, 0));
- GUARD(s2n_hmac_init(&conn->initial.record_mac_copy_workspace, S2N_HMAC_NONE, NULL, 0));
- GUARD(s2n_hmac_init(&conn->secure.client_record_mac, S2N_HMAC_NONE, NULL, 0));
- GUARD(s2n_hmac_init(&conn->secure.server_record_mac, S2N_HMAC_NONE, NULL, 0));
- GUARD(s2n_hmac_init(&conn->secure.record_mac_copy_workspace, S2N_HMAC_NONE, NULL, 0));
+ POSIX_GUARD(s2n_hmac_init(&conn->initial.client_record_mac, S2N_HMAC_NONE, NULL, 0));
+ POSIX_GUARD(s2n_hmac_init(&conn->initial.server_record_mac, S2N_HMAC_NONE, NULL, 0));
+ POSIX_GUARD(s2n_hmac_init(&conn->secure.client_record_mac, S2N_HMAC_NONE, NULL, 0));
+ POSIX_GUARD(s2n_hmac_init(&conn->secure.server_record_mac, S2N_HMAC_NONE, NULL, 0));
return 0;
}
+/* Allocates and initializes memory for a new connection.
+ *
+ * Since customers can reuse a connection, ensure that values on the connection are
+ * initialized in `s2n_connection_wipe` where possible. */
struct s2n_connection *s2n_connection_new(s2n_mode mode)
{
struct s2n_blob blob = {0};
- GUARD_PTR(s2n_alloc(&blob, sizeof(struct s2n_connection)));
- GUARD_PTR(s2n_blob_zero(&blob));
+ PTR_GUARD_POSIX(s2n_alloc(&blob, sizeof(struct s2n_connection)));
+ PTR_GUARD_POSIX(s2n_blob_zero(&blob));
/* Cast 'through' void to acknowledge that we are changing alignment,
* which is ok, as blob.data is always aligned.
*/
struct s2n_connection* conn = (struct s2n_connection *)(void *)blob.data;
- GUARD_PTR(s2n_connection_set_config(conn, s2n_fetch_default_config()));
+ PTR_GUARD_POSIX(s2n_connection_set_config(conn, s2n_fetch_default_config()));
+ /* `mode` is initialized here since its passed in as a parameter. */
conn->mode = mode;
- conn->blinding = S2N_BUILT_IN_BLINDING;
- conn->close_notify_queued = 0;
- conn->client_session_resumed = 0;
- conn->session_id_len = 0;
- conn->verify_host_fn = NULL;
- conn->data_for_verify_host = NULL;
- conn->verify_host_fn_overridden = 0;
- conn->data_for_verify_host = NULL;
- conn->send = NULL;
- conn->recv = NULL;
- conn->send_io_context = NULL;
- conn->recv_io_context = NULL;
- conn->managed_io = 0;
- conn->corked_io = 0;
- conn->context = NULL;
- conn->security_policy_override = NULL;
- conn->ticket_lifetime_hint = 0;
- conn->session_ticket_status = S2N_NO_TICKET;
/* Allocate the fixed-size stuffers */
blob = (struct s2n_blob) {0};
- GUARD_PTR(s2n_blob_init(&blob, conn->alert_in_data, S2N_ALERT_LENGTH));
- GUARD_PTR(s2n_stuffer_init(&conn->alert_in, &blob));
+ PTR_GUARD_POSIX(s2n_blob_init(&blob, conn->alert_in_data, S2N_ALERT_LENGTH));
+ PTR_GUARD_POSIX(s2n_stuffer_init(&conn->alert_in, &blob));
blob = (struct s2n_blob) {0};
- GUARD_PTR(s2n_blob_init(&blob, conn->reader_alert_out_data, S2N_ALERT_LENGTH));
- GUARD_PTR(s2n_stuffer_init(&conn->reader_alert_out, &blob));
+ PTR_GUARD_POSIX(s2n_blob_init(&blob, conn->reader_alert_out_data, S2N_ALERT_LENGTH));
+ PTR_GUARD_POSIX(s2n_stuffer_init(&conn->reader_alert_out, &blob));
blob = (struct s2n_blob) {0};
- GUARD_PTR(s2n_blob_init(&blob, conn->writer_alert_out_data, S2N_ALERT_LENGTH));
- GUARD_PTR(s2n_stuffer_init(&conn->writer_alert_out, &blob));
+ PTR_GUARD_POSIX(s2n_blob_init(&blob, conn->writer_alert_out_data, S2N_ALERT_LENGTH));
+ PTR_GUARD_POSIX(s2n_stuffer_init(&conn->writer_alert_out, &blob));
blob = (struct s2n_blob) {0};
- GUARD_PTR(s2n_blob_init(&blob, conn->ticket_ext_data, S2N_TICKET_SIZE_IN_BYTES));
- GUARD_PTR(s2n_stuffer_init(&conn->client_ticket_to_decrypt, &blob));
+ PTR_GUARD_POSIX(s2n_blob_init(&blob, conn->ticket_ext_data, S2N_TLS12_TICKET_SIZE_IN_BYTES));
+ PTR_GUARD_POSIX(s2n_stuffer_init(&conn->client_ticket_to_decrypt, &blob));
/* Allocate long term key memory */
- GUARD_PTR(s2n_session_key_alloc(&conn->secure.client_key));
- GUARD_PTR(s2n_session_key_alloc(&conn->secure.server_key));
- GUARD_PTR(s2n_session_key_alloc(&conn->initial.client_key));
- GUARD_PTR(s2n_session_key_alloc(&conn->initial.server_key));
+ PTR_GUARD_POSIX(s2n_session_key_alloc(&conn->secure.client_key));
+ PTR_GUARD_POSIX(s2n_session_key_alloc(&conn->secure.server_key));
+ PTR_GUARD_POSIX(s2n_session_key_alloc(&conn->initial.client_key));
+ PTR_GUARD_POSIX(s2n_session_key_alloc(&conn->initial.server_key));
/* Allocate long term hash and HMAC memory */
- GUARD_PTR(s2n_prf_new(conn));
-
- GUARD_PTR(s2n_connection_new_hashes(conn));
- GUARD_PTR(s2n_connection_init_hashes(conn));
+ PTR_GUARD_RESULT(s2n_prf_new(conn));
+ PTR_GUARD_RESULT(s2n_handshake_hashes_new(&conn->handshake.hashes));
- GUARD_PTR(s2n_connection_new_hmacs(conn));
- GUARD_PTR(s2n_connection_init_hmacs(conn));
+ PTR_GUARD_POSIX(s2n_connection_new_hmacs(conn));
+ PTR_GUARD_POSIX(s2n_connection_init_hmacs(conn));
/* Initialize the growable stuffers. Zero length at first, but the resize
* in _wipe will fix that
*/
blob = (struct s2n_blob) {0};
- GUARD_PTR(s2n_blob_init(&blob, conn->header_in_data, S2N_TLS_RECORD_HEADER_LENGTH));
- GUARD_PTR(s2n_stuffer_init(&conn->header_in, &blob));
- GUARD_PTR(s2n_stuffer_growable_alloc(&conn->out, 0));
- GUARD_PTR(s2n_stuffer_growable_alloc(&conn->in, 0));
- GUARD_PTR(s2n_stuffer_growable_alloc(&conn->handshake.io, 0));
- GUARD_PTR(s2n_stuffer_growable_alloc(&conn->client_hello.raw_message, 0));
- GUARD_PTR(s2n_connection_wipe(conn));
- GUARD_RESULT_PTR(s2n_timer_start(conn->config, &conn->write_timer));
-
- /* Initialize the cookie stuffer with zero length. If a cookie extension
- * is received, the stuffer will be resized according to the cookie length */
- GUARD_PTR(s2n_stuffer_growable_alloc(&conn->cookie_stuffer, 0));
-
+ PTR_GUARD_POSIX(s2n_blob_init(&blob, conn->header_in_data, S2N_TLS_RECORD_HEADER_LENGTH));
+ PTR_GUARD_POSIX(s2n_stuffer_init(&conn->header_in, &blob));
+ PTR_GUARD_POSIX(s2n_stuffer_growable_alloc(&conn->out, 0));
+ PTR_GUARD_POSIX(s2n_stuffer_growable_alloc(&conn->in, 0));
+ PTR_GUARD_POSIX(s2n_stuffer_growable_alloc(&conn->handshake.io, 0));
+ PTR_GUARD_POSIX(s2n_stuffer_growable_alloc(&conn->client_hello.raw_message, 0));
+ PTR_GUARD_RESULT(s2n_timer_start(conn->config, &conn->write_timer));
+
+ /* NOTE: s2n_connection_wipe MUST be called last in this function.
+ *
+ * s2n_connection_wipe is used for initializing values but also used by customers to
+ * reset/reuse the connection. Calling it last ensures that s2n_connection_wipe is
+ * implemented correctly and safe.
+ */
+ PTR_GUARD_POSIX(s2n_connection_wipe(conn));
return conn;
}
static int s2n_connection_free_keys(struct s2n_connection *conn)
{
- GUARD(s2n_session_key_free(&conn->secure.client_key));
- GUARD(s2n_session_key_free(&conn->secure.server_key));
- GUARD(s2n_session_key_free(&conn->initial.client_key));
- GUARD(s2n_session_key_free(&conn->initial.server_key));
+ POSIX_GUARD(s2n_session_key_free(&conn->secure.client_key));
+ POSIX_GUARD(s2n_session_key_free(&conn->secure.server_key));
+ POSIX_GUARD(s2n_session_key_free(&conn->initial.client_key));
+ POSIX_GUARD(s2n_session_key_free(&conn->initial.server_key));
return 0;
}
@@ -247,35 +166,33 @@ static int s2n_connection_free_keys(struct s2n_connection *conn)
static int s2n_connection_zero(struct s2n_connection *conn, int mode, struct s2n_config *config)
{
/* Zero the whole connection structure */
- memset_check(conn, 0, sizeof(struct s2n_connection));
+ POSIX_CHECKED_MEMSET(conn, 0, sizeof(struct s2n_connection));
- conn->send = NULL;
- conn->recv = NULL;
- conn->send_io_context = NULL;
- conn->recv_io_context = NULL;
conn->mode = mode;
- conn->close_notify_queued = 0;
- conn->client_session_resumed = 0;
- conn->current_user_data_consumed = 0;
conn->initial.cipher_suite = &s2n_null_cipher_suite;
conn->secure.cipher_suite = &s2n_null_cipher_suite;
- conn->initial.kem_params.kem = NULL;
- conn->secure.kem_params.kem = NULL;
conn->server = &conn->initial;
conn->client = &conn->initial;
conn->max_outgoing_fragment_length = S2N_DEFAULT_FRAGMENT_LENGTH;
- conn->mfl_code = S2N_TLS_MAX_FRAG_LEN_EXT_NONE;
- conn->handshake.handshake_type = INITIAL;
- conn->handshake.message_number = 0;
- conn->handshake.paused = 0;
- conn->verify_host_fn = NULL;
- conn->verify_host_fn_overridden = 0;
- conn->data_for_verify_host = NULL;
+ conn->handshake.end_of_messages = APPLICATION_DATA;
s2n_connection_set_config(conn, config);
return 0;
}
+S2N_RESULT s2n_connection_wipe_all_keyshares(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+
+ RESULT_GUARD_POSIX(s2n_ecc_evp_params_free(&conn->kex_params.server_ecc_evp_params));
+ RESULT_GUARD_POSIX(s2n_ecc_evp_params_free(&conn->kex_params.client_ecc_evp_params));
+
+ RESULT_GUARD_POSIX(s2n_kem_group_free(&conn->kex_params.server_kem_group_params));
+ RESULT_GUARD_POSIX(s2n_kem_group_free(&conn->kex_params.client_kem_group_params));
+
+ return S2N_RESULT_OK;
+}
+
static int s2n_connection_wipe_keys(struct s2n_connection *conn)
{
/* Destroy any keys - we call destroy on the object as that is where
@@ -284,52 +201,22 @@ static int s2n_connection_wipe_keys(struct s2n_connection *conn)
&& conn->secure.cipher_suite->record_alg
&& conn->secure.cipher_suite->record_alg->cipher
&& conn->secure.cipher_suite->record_alg->cipher->destroy_key) {
- GUARD(conn->secure.cipher_suite->record_alg->cipher->destroy_key(&conn->secure.client_key));
- GUARD(conn->secure.cipher_suite->record_alg->cipher->destroy_key(&conn->secure.server_key));
+ POSIX_GUARD(conn->secure.cipher_suite->record_alg->cipher->destroy_key(&conn->secure.client_key));
+ POSIX_GUARD(conn->secure.cipher_suite->record_alg->cipher->destroy_key(&conn->secure.server_key));
}
/* Free any server key received (we may not have completed a
* handshake, so this may not have been free'd yet) */
- GUARD(s2n_pkey_free(&conn->secure.server_public_key));
- GUARD(s2n_pkey_zero_init(&conn->secure.server_public_key));
- GUARD(s2n_pkey_free(&conn->secure.client_public_key));
- GUARD(s2n_pkey_zero_init(&conn->secure.client_public_key));
+ POSIX_GUARD(s2n_pkey_free(&conn->handshake_params.server_public_key));
+ POSIX_GUARD(s2n_pkey_zero_init(&conn->handshake_params.server_public_key));
+ POSIX_GUARD(s2n_pkey_free(&conn->handshake_params.client_public_key));
+ POSIX_GUARD(s2n_pkey_zero_init(&conn->handshake_params.client_public_key));
s2n_x509_validator_wipe(&conn->x509_validator);
- GUARD(s2n_dh_params_free(&conn->secure.server_dh_params));
- GUARD(s2n_ecc_evp_params_free(&conn->secure.server_ecc_evp_params));
- for (int i=0; i < S2N_ECC_EVP_SUPPORTED_CURVES_COUNT; i++) {
- GUARD(s2n_ecc_evp_params_free(&conn->secure.client_ecc_evp_params[i]));
- }
- GUARD(s2n_kem_group_free(&conn->secure.server_kem_group_params));
- for (int i = 0; i < S2N_SUPPORTED_KEM_GROUPS_COUNT; i++) {
- GUARD(s2n_kem_group_free(&conn->secure.client_kem_group_params[i]));
- }
- GUARD(s2n_kem_free(&conn->secure.kem_params));
- GUARD(s2n_free(&conn->secure.client_cert_chain));
- GUARD(s2n_free(&conn->ct_response));
-
- return 0;
-}
-
-static int s2n_connection_reset_hashes(struct s2n_connection *conn)
-{
- /* Reset all of the Connection's hash states */
- GUARD(s2n_hash_reset(&conn->handshake.md5));
- GUARD(s2n_hash_reset(&conn->handshake.sha1));
- GUARD(s2n_hash_reset(&conn->handshake.sha224));
- GUARD(s2n_hash_reset(&conn->handshake.sha256));
- GUARD(s2n_hash_reset(&conn->handshake.sha384));
- GUARD(s2n_hash_reset(&conn->handshake.sha512));
- GUARD(s2n_hash_reset(&conn->handshake.md5_sha1));
- GUARD(s2n_hash_reset(&conn->handshake.ccv_hash_copy));
- GUARD(s2n_hash_reset(&conn->handshake.prf_md5_hash_copy));
- GUARD(s2n_hash_reset(&conn->handshake.prf_sha1_hash_copy));
- GUARD(s2n_hash_reset(&conn->handshake.prf_tls12_hash_copy));
- GUARD(s2n_hash_reset(&conn->handshake.server_finished_copy));
- GUARD(s2n_hash_reset(&conn->prf_space.ssl3.md5));
- GUARD(s2n_hash_reset(&conn->prf_space.ssl3.sha1));
- GUARD(s2n_hash_reset(&conn->initial.signature_hash));
- GUARD(s2n_hash_reset(&conn->secure.signature_hash));
+ POSIX_GUARD(s2n_dh_params_free(&conn->kex_params.server_dh_params));
+ POSIX_GUARD_RESULT(s2n_connection_wipe_all_keyshares(conn));
+ POSIX_GUARD(s2n_kem_free(&conn->kex_params.kem_params));
+ POSIX_GUARD(s2n_free(&conn->handshake_params.client_cert_chain));
+ POSIX_GUARD(s2n_free(&conn->ct_response));
return 0;
}
@@ -337,66 +224,56 @@ static int s2n_connection_reset_hashes(struct s2n_connection *conn)
static int s2n_connection_reset_hmacs(struct s2n_connection *conn)
{
/* Reset all of the Connection's HMAC states */
- GUARD(s2n_hmac_reset(&conn->initial.client_record_mac));
- GUARD(s2n_hmac_reset(&conn->initial.server_record_mac));
- GUARD(s2n_hmac_reset(&conn->initial.record_mac_copy_workspace));
- GUARD(s2n_hmac_reset(&conn->secure.client_record_mac));
- GUARD(s2n_hmac_reset(&conn->secure.server_record_mac));
- GUARD(s2n_hmac_reset(&conn->secure.record_mac_copy_workspace));
+ POSIX_GUARD(s2n_hmac_reset(&conn->initial.client_record_mac));
+ POSIX_GUARD(s2n_hmac_reset(&conn->initial.server_record_mac));
+ POSIX_GUARD(s2n_hmac_reset(&conn->secure.client_record_mac));
+ POSIX_GUARD(s2n_hmac_reset(&conn->secure.server_record_mac));
return 0;
}
-static int s2n_connection_free_io_contexts(struct s2n_connection *conn)
+static int s2n_connection_free_managed_recv_io(struct s2n_connection *conn)
{
- /* Free the I/O context if it was allocated by s2n. Don't touch user-controlled contexts. */
- if (!conn->managed_io) {
- return 0;
+ POSIX_ENSURE_REF(conn);
+
+ if (conn->managed_recv_io) {
+ POSIX_GUARD(s2n_free_object((uint8_t **)&conn->recv_io_context, sizeof(struct s2n_socket_read_io_context)));
+ conn->managed_recv_io = false;
+ conn->recv = NULL;
}
+ return S2N_SUCCESS;
+}
+
+static int s2n_connection_free_managed_send_io(struct s2n_connection *conn)
+{
+ POSIX_ENSURE_REF(conn);
- GUARD(s2n_free_object((uint8_t **)&conn->send_io_context, sizeof(struct s2n_socket_write_io_context)));
- GUARD(s2n_free_object((uint8_t **)&conn->recv_io_context, sizeof(struct s2n_socket_read_io_context)));
+ if (conn->managed_send_io) {
+ POSIX_GUARD(s2n_free_object((uint8_t **)&conn->send_io_context, sizeof(struct s2n_socket_write_io_context)));
+ conn->managed_send_io = false;
+ conn->send = NULL;
+ }
+ return S2N_SUCCESS;
+}
- return 0;
+static int s2n_connection_free_managed_io(struct s2n_connection *conn)
+{
+ POSIX_GUARD(s2n_connection_free_managed_recv_io(conn));
+ POSIX_GUARD(s2n_connection_free_managed_send_io(conn));
+ return S2N_SUCCESS;
}
static int s2n_connection_wipe_io(struct s2n_connection *conn)
{
if (s2n_connection_is_managed_corked(conn) && conn->recv){
- GUARD(s2n_socket_read_restore(conn));
+ POSIX_GUARD(s2n_socket_read_restore(conn));
}
if (s2n_connection_is_managed_corked(conn) && conn->send){
- GUARD(s2n_socket_write_restore(conn));
+ POSIX_GUARD(s2n_socket_write_restore(conn));
}
/* Remove all I/O-related members */
- GUARD(s2n_connection_free_io_contexts(conn));
- conn->managed_io = 0;
- conn->send = NULL;
- conn->recv = NULL;
-
- return 0;
-}
-
-static int s2n_connection_free_hashes(struct s2n_connection *conn)
-{
- /* Free all of the Connection's hash states */
- GUARD(s2n_hash_free(&conn->handshake.md5));
- GUARD(s2n_hash_free(&conn->handshake.sha1));
- GUARD(s2n_hash_free(&conn->handshake.sha224));
- GUARD(s2n_hash_free(&conn->handshake.sha256));
- GUARD(s2n_hash_free(&conn->handshake.sha384));
- GUARD(s2n_hash_free(&conn->handshake.sha512));
- GUARD(s2n_hash_free(&conn->handshake.md5_sha1));
- GUARD(s2n_hash_free(&conn->handshake.ccv_hash_copy));
- GUARD(s2n_hash_free(&conn->handshake.prf_md5_hash_copy));
- GUARD(s2n_hash_free(&conn->handshake.prf_sha1_hash_copy));
- GUARD(s2n_hash_free(&conn->handshake.prf_tls12_hash_copy));
- GUARD(s2n_hash_free(&conn->handshake.server_finished_copy));
- GUARD(s2n_hash_free(&conn->prf_space.ssl3.md5));
- GUARD(s2n_hash_free(&conn->prf_space.ssl3.sha1));
- GUARD(s2n_hash_free(&conn->initial.signature_hash));
- GUARD(s2n_hash_free(&conn->secure.signature_hash));
+ POSIX_GUARD(s2n_connection_free_managed_io(conn));
return 0;
}
@@ -404,12 +281,10 @@ static int s2n_connection_free_hashes(struct s2n_connection *conn)
static int s2n_connection_free_hmacs(struct s2n_connection *conn)
{
/* Free all of the Connection's HMAC states */
- GUARD(s2n_hmac_free(&conn->initial.client_record_mac));
- GUARD(s2n_hmac_free(&conn->initial.server_record_mac));
- GUARD(s2n_hmac_free(&conn->initial.record_mac_copy_workspace));
- GUARD(s2n_hmac_free(&conn->secure.client_record_mac));
- GUARD(s2n_hmac_free(&conn->secure.server_record_mac));
- GUARD(s2n_hmac_free(&conn->secure.record_mac_copy_workspace));
+ POSIX_GUARD(s2n_hmac_free(&conn->initial.client_record_mac));
+ POSIX_GUARD(s2n_hmac_free(&conn->initial.server_record_mac));
+ POSIX_GUARD(s2n_hmac_free(&conn->secure.client_record_mac));
+ POSIX_GUARD(s2n_hmac_free(&conn->secure.server_record_mac));
return 0;
}
@@ -448,42 +323,50 @@ static uint8_t s2n_default_verify_host(const char *host_name, size_t len, void *
return 0;
}
-int s2n_connection_free(struct s2n_connection *conn)
+S2N_CLEANUP_RESULT s2n_connection_ptr_free(struct s2n_connection **conn)
{
- GUARD(s2n_connection_wipe_keys(conn));
- GUARD(s2n_connection_free_keys(conn));
- GUARD_AS_POSIX(s2n_psk_parameters_wipe(&conn->psk_params));
+ RESULT_ENSURE_REF(conn);
+ RESULT_GUARD_POSIX(s2n_connection_free(*conn));
+ *conn = NULL;
+ return S2N_RESULT_OK;
+}
- GUARD(s2n_prf_free(conn));
+int s2n_connection_free(struct s2n_connection *conn)
+{
+ POSIX_GUARD(s2n_connection_wipe_keys(conn));
+ POSIX_GUARD(s2n_connection_free_keys(conn));
+ POSIX_GUARD_RESULT(s2n_psk_parameters_wipe(&conn->psk_params));
- GUARD(s2n_connection_reset_hashes(conn));
- GUARD(s2n_connection_free_hashes(conn));
+ POSIX_GUARD_RESULT(s2n_prf_free(conn));
+ POSIX_GUARD_RESULT(s2n_handshake_hashes_free(&conn->handshake.hashes));
- GUARD(s2n_connection_reset_hmacs(conn));
- GUARD(s2n_connection_free_hmacs(conn));
+ POSIX_GUARD(s2n_connection_reset_hmacs(conn));
+ POSIX_GUARD(s2n_connection_free_hmacs(conn));
- GUARD(s2n_connection_free_io_contexts(conn));
+ POSIX_GUARD(s2n_connection_free_managed_io(conn));
- GUARD(s2n_free(&conn->client_ticket));
- GUARD(s2n_free(&conn->status_response));
- GUARD(s2n_free(&conn->our_quic_transport_parameters));
- GUARD(s2n_free(&conn->peer_quic_transport_parameters));
- GUARD(s2n_stuffer_free(&conn->in));
- GUARD(s2n_stuffer_free(&conn->out));
- GUARD(s2n_stuffer_free(&conn->handshake.io));
+ POSIX_GUARD(s2n_free(&conn->client_ticket));
+ POSIX_GUARD(s2n_free(&conn->status_response));
+ POSIX_GUARD(s2n_free(&conn->our_quic_transport_parameters));
+ POSIX_GUARD(s2n_free(&conn->peer_quic_transport_parameters));
+ POSIX_GUARD(s2n_free(&conn->server_early_data_context));
+ POSIX_GUARD(s2n_free(&conn->tls13_ticket_fields.session_secret));
+ POSIX_GUARD(s2n_stuffer_free(&conn->in));
+ POSIX_GUARD(s2n_stuffer_free(&conn->out));
+ POSIX_GUARD(s2n_stuffer_free(&conn->handshake.io));
s2n_x509_validator_wipe(&conn->x509_validator);
- GUARD(s2n_client_hello_free(&conn->client_hello));
- GUARD(s2n_free(&conn->application_protocols_overridden));
- GUARD(s2n_stuffer_free(&conn->cookie_stuffer));
- GUARD(s2n_free_object((uint8_t **)&conn, sizeof(struct s2n_connection)));
+ POSIX_GUARD(s2n_client_hello_free(&conn->client_hello));
+ POSIX_GUARD(s2n_free(&conn->application_protocols_overridden));
+ POSIX_GUARD(s2n_stuffer_free(&conn->cookie_stuffer));
+ POSIX_GUARD(s2n_free_object((uint8_t **)&conn, sizeof(struct s2n_connection)));
return 0;
}
int s2n_connection_set_config(struct s2n_connection *conn, struct s2n_config *config)
{
- notnull_check(conn);
- notnull_check(config);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(config);
if (conn->config == config) {
return 0;
@@ -491,7 +374,7 @@ int s2n_connection_set_config(struct s2n_connection *conn, struct s2n_config *co
/* We only support one client certificate */
if (s2n_config_get_num_default_certs(config) > 1 && conn->mode == S2N_CLIENT) {
- S2N_ERROR(S2N_ERR_TOO_MANY_CERTIFICATES);
+ POSIX_BAIL(S2N_ERR_TOO_MANY_CERTIFICATES);
}
s2n_x509_validator_wipe(&conn->x509_validator);
@@ -505,10 +388,10 @@ int s2n_connection_set_config(struct s2n_connection *conn, struct s2n_config *co
int8_t dont_need_x509_validation = (conn->mode == S2N_SERVER) && (auth_type == S2N_CERT_AUTH_NONE);
if (config->disable_x509_validation || dont_need_x509_validation) {
- GUARD(s2n_x509_validator_init_no_x509_validation(&conn->x509_validator));
+ POSIX_GUARD(s2n_x509_validator_init_no_x509_validation(&conn->x509_validator));
}
else {
- GUARD(s2n_x509_validator_init(&conn->x509_validator, &config->trust_store, config->check_ocsp));
+ POSIX_GUARD(s2n_x509_validator_init(&conn->x509_validator, &config->trust_store, config->check_ocsp));
if (!conn->verify_host_fn_overridden) {
if (config->verify_host != NULL) {
conn->verify_host_fn = config->verify_host;
@@ -520,18 +403,58 @@ int s2n_connection_set_config(struct s2n_connection *conn, struct s2n_config *co
}
if (config->max_verify_cert_chain_depth_set) {
- GUARD(s2n_x509_validator_set_max_chain_depth(&conn->x509_validator, config->max_verify_cert_chain_depth));
+ POSIX_GUARD(s2n_x509_validator_set_max_chain_depth(&conn->x509_validator, config->max_verify_cert_chain_depth));
}
}
+ conn->tickets_to_send = config->initial_tickets_to_send;
+
+ if (conn->psk_params.psk_list.len == 0 && !conn->psk_mode_overridden) {
+ POSIX_GUARD(s2n_connection_set_psk_mode(conn, config->psk_mode));
+ conn->psk_mode_overridden = false;
+ }
+
+ /* If at least one certificate does not have a private key configured,
+ * the config must provide an async pkey callback.
+ * The handshake could still fail if the callback doesn't offload the
+ * signature, but this at least catches configuration mistakes.
+ */
+ if (config->no_signing_key) {
+ POSIX_ENSURE(config->async_pkey_cb, S2N_ERR_NO_PRIVATE_KEY);
+ }
+
+ if (config->quic_enabled) {
+ /* If QUIC is ever enabled for a connection via the config,
+ * we should enforce that it can never be disabled by
+ * changing the config.
+ *
+ * Enabling QUIC indicates that the connection is being used by
+ * a QUIC implementation, which never changes. Disabling QUIC
+ * partially through a connection could also potentially be
+ * dangerous, as QUIC handles encryption.
+ */
+ POSIX_GUARD(s2n_connection_enable_quic(conn));
+ }
conn->config = config;
- return 0;
+ return S2N_SUCCESS;
+}
+
+int s2n_connection_server_name_extension_used(struct s2n_connection *conn)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE(conn->mode == S2N_SERVER, S2N_ERR_INVALID_STATE);
+ POSIX_ENSURE(!(conn->handshake.client_hello_received), S2N_ERR_INVALID_STATE);
+
+ conn->server_name_used = 1;
+ return S2N_SUCCESS;
}
int s2n_connection_set_ctx(struct s2n_connection *conn, void *ctx)
{
+ POSIX_ENSURE_REF(conn);
+
conn->context = ctx;
- return 0;
+ return S2N_SUCCESS;
}
void *s2n_connection_get_ctx(struct s2n_connection *conn)
@@ -541,55 +464,51 @@ void *s2n_connection_get_ctx(struct s2n_connection *conn)
int s2n_connection_release_buffers(struct s2n_connection *conn)
{
- notnull_check(conn);
- PRECONDITION_POSIX(s2n_stuffer_validate(&conn->out));
- PRECONDITION_POSIX(s2n_stuffer_validate(&conn->in));
+ POSIX_ENSURE_REF(conn);
+ POSIX_PRECONDITION(s2n_stuffer_validate(&conn->out));
+ POSIX_PRECONDITION(s2n_stuffer_validate(&conn->in));
- ENSURE_POSIX(s2n_stuffer_is_consumed(&conn->out), S2N_ERR_STUFFER_HAS_UNPROCESSED_DATA);
- GUARD(s2n_stuffer_resize(&conn->out, 0));
+ POSIX_ENSURE(s2n_stuffer_is_consumed(&conn->out), S2N_ERR_STUFFER_HAS_UNPROCESSED_DATA);
+ POSIX_GUARD(s2n_stuffer_resize(&conn->out, 0));
- ENSURE_POSIX(s2n_stuffer_is_consumed(&conn->in), S2N_ERR_STUFFER_HAS_UNPROCESSED_DATA);
- GUARD(s2n_stuffer_resize(&conn->in, 0));
+ POSIX_ENSURE(s2n_stuffer_is_consumed(&conn->in), S2N_ERR_STUFFER_HAS_UNPROCESSED_DATA);
+ POSIX_GUARD(s2n_stuffer_resize(&conn->in, 0));
- POSTCONDITION_POSIX(s2n_stuffer_validate(&conn->out));
- POSTCONDITION_POSIX(s2n_stuffer_validate(&conn->in));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(&conn->out));
+ POSIX_POSTCONDITION(s2n_stuffer_validate(&conn->in));
return S2N_SUCCESS;
}
int s2n_connection_free_handshake(struct s2n_connection *conn)
{
/* We are done with the handshake */
- GUARD(s2n_hash_reset(&conn->handshake.md5));
- GUARD(s2n_hash_reset(&conn->handshake.sha1));
- GUARD(s2n_hash_reset(&conn->handshake.sha224));
- GUARD(s2n_hash_reset(&conn->handshake.sha256));
- GUARD(s2n_hash_reset(&conn->handshake.sha384));
- GUARD(s2n_hash_reset(&conn->handshake.sha512));
- GUARD(s2n_hash_reset(&conn->handshake.md5_sha1));
- GUARD(s2n_hash_reset(&conn->handshake.ccv_hash_copy));
- GUARD(s2n_hash_reset(&conn->handshake.prf_md5_hash_copy));
- GUARD(s2n_hash_reset(&conn->handshake.prf_sha1_hash_copy));
- GUARD(s2n_hash_reset(&conn->handshake.prf_tls12_hash_copy));
- GUARD(s2n_hash_reset(&conn->handshake.server_finished_copy));
+ POSIX_GUARD_RESULT(s2n_handshake_hashes_free(&conn->handshake.hashes));
+ POSIX_GUARD_RESULT(s2n_prf_free(conn));
/* Wipe the buffers we are going to free */
- GUARD(s2n_stuffer_wipe(&conn->handshake.io));
- GUARD(s2n_stuffer_wipe(&conn->client_hello.raw_message));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->handshake.io));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->client_hello.raw_message));
/* Truncate buffers to save memory, we are done with the handshake */
- GUARD(s2n_stuffer_resize(&conn->handshake.io, 0));
- GUARD(s2n_stuffer_resize(&conn->client_hello.raw_message, 0));
+ POSIX_GUARD(s2n_stuffer_resize(&conn->handshake.io, 0));
+ POSIX_GUARD(s2n_stuffer_resize(&conn->client_hello.raw_message, 0));
/* We can free extension data we no longer need */
- GUARD(s2n_free(&conn->client_ticket));
- GUARD(s2n_free(&conn->status_response));
- GUARD(s2n_free(&conn->our_quic_transport_parameters));
- GUARD(s2n_free(&conn->application_protocols_overridden));
- GUARD(s2n_stuffer_free(&conn->cookie_stuffer));
+ POSIX_GUARD(s2n_free(&conn->client_ticket));
+ POSIX_GUARD(s2n_free(&conn->status_response));
+ POSIX_GUARD(s2n_free(&conn->our_quic_transport_parameters));
+ POSIX_GUARD(s2n_free(&conn->application_protocols_overridden));
+ POSIX_GUARD(s2n_stuffer_free(&conn->cookie_stuffer));
return 0;
}
+/* An idempotent operation which initializes values on the connection.
+ *
+ * Called in order to reuse a connection structure for a new connection. Should wipe
+ * any persistent memory, free any temporary memory, and set all fields back to their
+ * defaults.
+ */
int s2n_connection_wipe(struct s2n_connection *conn)
{
/* First make a copy of everything we'd like to save, which isn't very much. */
@@ -609,43 +528,59 @@ int s2n_connection_wipe(struct s2n_connection *conn)
struct s2n_session_key initial_server_key = {0};
struct s2n_session_key secure_client_key = {0};
struct s2n_session_key secure_server_key = {0};
- /* Parts of the PRF working space, hash states, and hmac states will be wiped. Preserve structs to avoid reallocation */
- struct s2n_connection_prf_handles prf_handles = {0};
- struct s2n_connection_hash_handles hash_handles = {0};
+ /* Parts of the hmac states will be wiped. Preserve structs to avoid reallocation */
struct s2n_connection_hmac_handles hmac_handles = {0};
+ /* Some required structures might have been freed to conserve memory between handshakes.
+ * Restore them.
+ */
+ if (!conn->handshake.hashes) {
+ POSIX_GUARD_RESULT(s2n_handshake_hashes_new(&conn->handshake.hashes));
+ }
+ POSIX_GUARD_RESULT(s2n_handshake_hashes_wipe(conn->handshake.hashes));
+ struct s2n_handshake_hashes *handshake_hashes = conn->handshake.hashes;
+ if (!conn->prf_space) {
+ POSIX_GUARD_RESULT(s2n_prf_new(conn));
+ }
+ POSIX_GUARD_RESULT(s2n_prf_wipe(conn));
+ struct s2n_prf_working_space *prf_workspace = conn->prf_space;
+
/* Wipe all of the sensitive stuff */
- GUARD(s2n_connection_wipe_keys(conn));
- GUARD(s2n_connection_reset_hashes(conn));
- GUARD(s2n_connection_reset_hmacs(conn));
- GUARD(s2n_stuffer_wipe(&conn->alert_in));
- GUARD(s2n_stuffer_wipe(&conn->reader_alert_out));
- GUARD(s2n_stuffer_wipe(&conn->writer_alert_out));
- GUARD(s2n_stuffer_wipe(&conn->client_ticket_to_decrypt));
- GUARD(s2n_stuffer_wipe(&conn->handshake.io));
- GUARD(s2n_stuffer_wipe(&conn->client_hello.raw_message));
- GUARD(s2n_stuffer_wipe(&conn->header_in));
- GUARD(s2n_stuffer_wipe(&conn->in));
- GUARD(s2n_stuffer_wipe(&conn->out));
-
- GUARD_AS_POSIX(s2n_psk_parameters_wipe(&conn->psk_params));
+ POSIX_GUARD(s2n_connection_wipe_keys(conn));
+ POSIX_GUARD(s2n_connection_reset_hmacs(conn));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->alert_in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->reader_alert_out));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->writer_alert_out));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->client_ticket_to_decrypt));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->handshake.io));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->client_hello.raw_message));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->header_in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->out));
+
+ POSIX_GUARD_RESULT(s2n_psk_parameters_wipe(&conn->psk_params));
/* Wipe the I/O-related info and restore the original socket if necessary */
- GUARD(s2n_connection_wipe_io(conn));
-
- GUARD(s2n_free(&conn->client_ticket));
- GUARD(s2n_free(&conn->status_response));
- GUARD(s2n_free(&conn->application_protocols_overridden));
- GUARD(s2n_free(&conn->our_quic_transport_parameters));
- GUARD(s2n_free(&conn->peer_quic_transport_parameters));
+ POSIX_GUARD(s2n_connection_wipe_io(conn));
+
+ POSIX_GUARD(s2n_free(&conn->client_ticket));
+ POSIX_GUARD(s2n_free(&conn->status_response));
+ POSIX_GUARD(s2n_free(&conn->application_protocols_overridden));
+ POSIX_GUARD(s2n_free(&conn->our_quic_transport_parameters));
+ POSIX_GUARD(s2n_free(&conn->peer_quic_transport_parameters));
+ POSIX_GUARD(s2n_free(&conn->server_early_data_context));
+ POSIX_GUARD(s2n_free(&conn->tls13_ticket_fields.session_secret));
+ /* TODO: Simplify cookie_stuffer implementation.
+ * https://github.com/aws/s2n-tls/issues/3287 */
+ POSIX_GUARD(s2n_stuffer_free(&conn->cookie_stuffer));
/* Allocate memory for handling handshakes */
- GUARD(s2n_stuffer_resize(&conn->handshake.io, S2N_LARGE_RECORD_LENGTH));
+ POSIX_GUARD(s2n_stuffer_resize(&conn->handshake.io, S2N_LARGE_RECORD_LENGTH));
/* Truncate the message buffers to save memory, we will dynamically resize it as needed */
- GUARD(s2n_stuffer_resize(&conn->client_hello.raw_message, 0));
- GUARD(s2n_stuffer_resize(&conn->in, 0));
- GUARD(s2n_stuffer_resize(&conn->out, 0));
+ POSIX_GUARD(s2n_stuffer_resize(&conn->client_hello.raw_message, 0));
+ POSIX_GUARD(s2n_stuffer_resize(&conn->in, 0));
+ POSIX_GUARD(s2n_stuffer_resize(&conn->out, 0));
/* Remove context associated with connection */
conn->context = NULL;
@@ -660,53 +595,51 @@ int s2n_connection_wipe(struct s2n_connection *conn)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Waddress"
#endif
- memcpy_check(&alert_in, &conn->alert_in, sizeof(struct s2n_stuffer));
- memcpy_check(&reader_alert_out, &conn->reader_alert_out, sizeof(struct s2n_stuffer));
- memcpy_check(&writer_alert_out, &conn->writer_alert_out, sizeof(struct s2n_stuffer));
- memcpy_check(&client_ticket_to_decrypt, &conn->client_ticket_to_decrypt, sizeof(struct s2n_stuffer));
- memcpy_check(&handshake_io, &conn->handshake.io, sizeof(struct s2n_stuffer));
- memcpy_check(&client_hello_raw_message, &conn->client_hello.raw_message, sizeof(struct s2n_stuffer));
- memcpy_check(&header_in, &conn->header_in, sizeof(struct s2n_stuffer));
- memcpy_check(&in, &conn->in, sizeof(struct s2n_stuffer));
- memcpy_check(&out, &conn->out, sizeof(struct s2n_stuffer));
- memcpy_check(&initial_client_key, &conn->initial.client_key, sizeof(struct s2n_session_key));
- memcpy_check(&initial_server_key, &conn->initial.server_key, sizeof(struct s2n_session_key));
- memcpy_check(&secure_client_key, &conn->secure.client_key, sizeof(struct s2n_session_key));
- memcpy_check(&secure_server_key, &conn->secure.server_key, sizeof(struct s2n_session_key));
- GUARD(s2n_connection_save_prf_state(&prf_handles, conn));
- GUARD(s2n_connection_save_hash_state(&hash_handles, conn));
- GUARD(s2n_connection_save_hmac_state(&hmac_handles, conn));
+ POSIX_CHECKED_MEMCPY(&alert_in, &conn->alert_in, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&reader_alert_out, &conn->reader_alert_out, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&writer_alert_out, &conn->writer_alert_out, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&client_ticket_to_decrypt, &conn->client_ticket_to_decrypt, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&handshake_io, &conn->handshake.io, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&client_hello_raw_message, &conn->client_hello.raw_message, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&header_in, &conn->header_in, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&in, &conn->in, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&out, &conn->out, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&initial_client_key, &conn->initial.client_key, sizeof(struct s2n_session_key));
+ POSIX_CHECKED_MEMCPY(&initial_server_key, &conn->initial.server_key, sizeof(struct s2n_session_key));
+ POSIX_CHECKED_MEMCPY(&secure_client_key, &conn->secure.client_key, sizeof(struct s2n_session_key));
+ POSIX_CHECKED_MEMCPY(&secure_server_key, &conn->secure.server_key, sizeof(struct s2n_session_key));
+ POSIX_GUARD(s2n_connection_save_hmac_state(&hmac_handles, conn));
#if S2N_GCC_VERSION_AT_LEAST(4,6,0)
#pragma GCC diagnostic pop
#endif
- GUARD(s2n_connection_zero(conn, mode, config));
-
- memcpy_check(&conn->alert_in, &alert_in, sizeof(struct s2n_stuffer));
- memcpy_check(&conn->reader_alert_out, &reader_alert_out, sizeof(struct s2n_stuffer));
- memcpy_check(&conn->writer_alert_out, &writer_alert_out, sizeof(struct s2n_stuffer));
- memcpy_check(&conn->client_ticket_to_decrypt, &client_ticket_to_decrypt, sizeof(struct s2n_stuffer));
- memcpy_check(&conn->handshake.io, &handshake_io, sizeof(struct s2n_stuffer));
- memcpy_check(&conn->client_hello.raw_message, &client_hello_raw_message, sizeof(struct s2n_stuffer));
- memcpy_check(&conn->header_in, &header_in, sizeof(struct s2n_stuffer));
- memcpy_check(&conn->in, &in, sizeof(struct s2n_stuffer));
- memcpy_check(&conn->out, &out, sizeof(struct s2n_stuffer));
- memcpy_check(&conn->initial.client_key, &initial_client_key, sizeof(struct s2n_session_key));
- memcpy_check(&conn->initial.server_key, &initial_server_key, sizeof(struct s2n_session_key));
- memcpy_check(&conn->secure.client_key, &secure_client_key, sizeof(struct s2n_session_key));
- memcpy_check(&conn->secure.server_key, &secure_server_key, sizeof(struct s2n_session_key));
- GUARD(s2n_connection_restore_prf_state(conn, &prf_handles));
- GUARD(s2n_connection_restore_hash_state(conn, &hash_handles));
- GUARD(s2n_connection_restore_hmac_state(conn, &hmac_handles));
+ POSIX_GUARD(s2n_connection_zero(conn, mode, config));
+
+ POSIX_CHECKED_MEMCPY(&conn->alert_in, &alert_in, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&conn->reader_alert_out, &reader_alert_out, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&conn->writer_alert_out, &writer_alert_out, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&conn->client_ticket_to_decrypt, &client_ticket_to_decrypt, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&conn->handshake.io, &handshake_io, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&conn->client_hello.raw_message, &client_hello_raw_message, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&conn->header_in, &header_in, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&conn->in, &in, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&conn->out, &out, sizeof(struct s2n_stuffer));
+ POSIX_CHECKED_MEMCPY(&conn->initial.client_key, &initial_client_key, sizeof(struct s2n_session_key));
+ POSIX_CHECKED_MEMCPY(&conn->initial.server_key, &initial_server_key, sizeof(struct s2n_session_key));
+ POSIX_CHECKED_MEMCPY(&conn->secure.client_key, &secure_client_key, sizeof(struct s2n_session_key));
+ POSIX_CHECKED_MEMCPY(&conn->secure.server_key, &secure_server_key, sizeof(struct s2n_session_key));
+ POSIX_GUARD(s2n_connection_restore_hmac_state(conn, &hmac_handles));
+ conn->handshake.hashes = handshake_hashes;
+ conn->prf_space = prf_workspace;
/* Re-initialize hash and hmac states */
- GUARD(s2n_connection_init_hashes(conn));
- GUARD(s2n_connection_init_hmacs(conn));
+ POSIX_GUARD(s2n_connection_init_hmacs(conn));
- GUARD_AS_POSIX(s2n_psk_parameters_init(&conn->psk_params));
+ POSIX_GUARD_RESULT(s2n_psk_parameters_init(&conn->psk_params));
+ conn->server_keying_material_lifetime = ONE_WEEK_IN_SEC;
/* Require all handshakes hashes. This set can be reduced as the handshake progresses. */
- GUARD(s2n_handshake_require_all_hashes(&conn->handshake));
+ POSIX_GUARD(s2n_handshake_require_all_hashes(&conn->handshake));
if (conn->mode == S2N_SERVER) {
/* Start with the highest protocol version so that the highest common protocol version can be selected */
@@ -723,142 +656,157 @@ int s2n_connection_wipe(struct s2n_connection *conn)
conn->actual_protocol_version = s2n_highest_protocol_version;
}
+ /* Initialize remaining values */
+ conn->blinding = S2N_BUILT_IN_BLINDING;
+ conn->session_ticket_status = S2N_NO_TICKET;
+ /* Initialize the cookie stuffer with zero length. If a cookie extension
+ * is received, the stuffer will be resized according to the cookie length */
+ POSIX_GUARD(s2n_stuffer_growable_alloc(&conn->cookie_stuffer, 0));
+
return 0;
}
int s2n_connection_set_recv_ctx(struct s2n_connection *conn, void *ctx)
{
+ POSIX_ENSURE_REF(conn);
+ POSIX_GUARD(s2n_connection_free_managed_recv_io(conn));
conn->recv_io_context = ctx;
- return 0;
+ return S2N_SUCCESS;
}
int s2n_connection_set_send_ctx(struct s2n_connection *conn, void *ctx)
{
+ POSIX_ENSURE_REF(conn);
+ POSIX_GUARD(s2n_connection_free_managed_send_io(conn));
conn->send_io_context = ctx;
- return 0;
+ return S2N_SUCCESS;
}
int s2n_connection_set_recv_cb(struct s2n_connection *conn, s2n_recv_fn recv)
{
+ POSIX_ENSURE_REF(conn);
+ POSIX_GUARD(s2n_connection_free_managed_recv_io(conn));
conn->recv = recv;
- return 0;
+ return S2N_SUCCESS;
}
int s2n_connection_set_send_cb(struct s2n_connection *conn, s2n_send_fn send)
{
+ POSIX_ENSURE_REF(conn);
+ POSIX_GUARD(s2n_connection_free_managed_send_io(conn));
conn->send = send;
- return 0;
+ return S2N_SUCCESS;
}
int s2n_connection_get_client_cert_chain(struct s2n_connection *conn, uint8_t **der_cert_chain_out, uint32_t *cert_chain_len)
{
- notnull_check(conn);
- notnull_check(der_cert_chain_out);
- notnull_check(cert_chain_len);
- notnull_check(conn->secure.client_cert_chain.data);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(der_cert_chain_out);
+ POSIX_ENSURE_REF(cert_chain_len);
+ POSIX_ENSURE_REF(conn->handshake_params.client_cert_chain.data);
- *der_cert_chain_out = conn->secure.client_cert_chain.data;
- *cert_chain_len = conn->secure.client_cert_chain.size;
+ *der_cert_chain_out = conn->handshake_params.client_cert_chain.data;
+ *cert_chain_len = conn->handshake_params.client_cert_chain.size;
return 0;
}
int s2n_connection_get_cipher_preferences(struct s2n_connection *conn, const struct s2n_cipher_preferences **cipher_preferences)
{
- notnull_check(conn);
- notnull_check(conn->config);
- notnull_check(cipher_preferences);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->config);
+ POSIX_ENSURE_REF(cipher_preferences);
if (conn->security_policy_override != NULL) {
*cipher_preferences = conn->security_policy_override->cipher_preferences;
} else if (conn->config->security_policy != NULL) {
*cipher_preferences = conn->config->security_policy->cipher_preferences;
} else {
- S2N_ERROR(S2N_ERR_INVALID_CIPHER_PREFERENCES);
+ POSIX_BAIL(S2N_ERR_INVALID_CIPHER_PREFERENCES);
}
- notnull_check(*cipher_preferences);
+ POSIX_ENSURE_REF(*cipher_preferences);
return 0;
}
int s2n_connection_get_security_policy(struct s2n_connection *conn, const struct s2n_security_policy **security_policy)
{
- notnull_check(conn);
- notnull_check(conn->config);
- notnull_check(security_policy);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->config);
+ POSIX_ENSURE_REF(security_policy);
if (conn->security_policy_override != NULL) {
*security_policy = conn->security_policy_override;
} else if (conn->config->security_policy != NULL) {
*security_policy = conn->config->security_policy;
} else {
- S2N_ERROR(S2N_ERR_INVALID_SECURITY_POLICY);
+ POSIX_BAIL(S2N_ERR_INVALID_SECURITY_POLICY);
}
- notnull_check(*security_policy);
+ POSIX_ENSURE_REF(*security_policy);
return 0;
}
int s2n_connection_get_kem_preferences(struct s2n_connection *conn, const struct s2n_kem_preferences **kem_preferences)
{
- notnull_check(conn);
- notnull_check(conn->config);
- notnull_check(kem_preferences);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->config);
+ POSIX_ENSURE_REF(kem_preferences);
if (conn->security_policy_override != NULL) {
*kem_preferences = conn->security_policy_override->kem_preferences;
} else if (conn->config->security_policy != NULL) {
*kem_preferences = conn->config->security_policy->kem_preferences;
} else {
- S2N_ERROR(S2N_ERR_INVALID_KEM_PREFERENCES);
+ POSIX_BAIL(S2N_ERR_INVALID_KEM_PREFERENCES);
}
- notnull_check(*kem_preferences);
+ POSIX_ENSURE_REF(*kem_preferences);
return 0;
}
int s2n_connection_get_signature_preferences(struct s2n_connection *conn, const struct s2n_signature_preferences **signature_preferences)
{
- notnull_check(conn);
- notnull_check(conn->config);
- notnull_check(signature_preferences);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->config);
+ POSIX_ENSURE_REF(signature_preferences);
if (conn->security_policy_override != NULL) {
*signature_preferences = conn->security_policy_override->signature_preferences;
} else if (conn->config->security_policy != NULL) {
*signature_preferences = conn->config->security_policy->signature_preferences;
} else {
- S2N_ERROR(S2N_ERR_INVALID_SIGNATURE_ALGORITHMS_PREFERENCES);
+ POSIX_BAIL(S2N_ERR_INVALID_SIGNATURE_ALGORITHMS_PREFERENCES);
}
- notnull_check(*signature_preferences);
+ POSIX_ENSURE_REF(*signature_preferences);
return 0;
}
int s2n_connection_get_ecc_preferences(struct s2n_connection *conn, const struct s2n_ecc_preferences **ecc_preferences)
{
- notnull_check(conn);
- notnull_check(conn->config);
- notnull_check(ecc_preferences);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->config);
+ POSIX_ENSURE_REF(ecc_preferences);
if (conn->security_policy_override != NULL) {
*ecc_preferences = conn->security_policy_override->ecc_preferences;
} else if (conn->config->security_policy != NULL) {
*ecc_preferences = conn->config->security_policy->ecc_preferences;
} else {
- S2N_ERROR(S2N_ERR_INVALID_ECC_PREFERENCES);
+ POSIX_BAIL(S2N_ERR_INVALID_ECC_PREFERENCES);
}
- notnull_check(*ecc_preferences);
+ POSIX_ENSURE_REF(*ecc_preferences);
return 0;
}
int s2n_connection_get_protocol_preferences(struct s2n_connection *conn, struct s2n_blob **protocol_preferences)
{
- notnull_check(conn);
- notnull_check(protocol_preferences);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(protocol_preferences);
*protocol_preferences = NULL;
if (conn->application_protocols_overridden.size > 0) {
@@ -867,18 +815,19 @@ int s2n_connection_get_protocol_preferences(struct s2n_connection *conn, struct
*protocol_preferences = &conn->config->application_protocols;
}
- notnull_check(*protocol_preferences);
+ POSIX_ENSURE_REF(*protocol_preferences);
return 0;
}
int s2n_connection_get_client_auth_type(struct s2n_connection *conn, s2n_cert_auth_type *client_cert_auth_type)
{
- notnull_check(conn);
- notnull_check(client_cert_auth_type);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(client_cert_auth_type);
if (conn->client_cert_auth_type_overridden) {
*client_cert_auth_type = conn->client_cert_auth_type;
} else {
+ POSIX_ENSURE_REF(conn->config);
*client_cert_auth_type = conn->config->client_cert_auth_type;
}
@@ -897,42 +846,55 @@ int s2n_connection_set_read_fd(struct s2n_connection *conn, int rfd)
struct s2n_blob ctx_mem = {0};
struct s2n_socket_read_io_context *peer_socket_ctx;
- GUARD(s2n_alloc(&ctx_mem, sizeof(struct s2n_socket_read_io_context)));
- GUARD(s2n_blob_zero(&ctx_mem));
+ POSIX_ENSURE_REF(conn);
+ POSIX_GUARD(s2n_alloc(&ctx_mem, sizeof(struct s2n_socket_read_io_context)));
+ POSIX_GUARD(s2n_blob_zero(&ctx_mem));
peer_socket_ctx = (struct s2n_socket_read_io_context *)(void *)ctx_mem.data;
peer_socket_ctx->fd = rfd;
- s2n_connection_set_recv_cb(conn, s2n_socket_read);
- s2n_connection_set_recv_ctx(conn, peer_socket_ctx);
- conn->managed_io = 1;
+ POSIX_GUARD(s2n_connection_set_recv_cb(conn, s2n_socket_read));
+ POSIX_GUARD(s2n_connection_set_recv_ctx(conn, peer_socket_ctx));
+ conn->managed_recv_io = true;
/* This is only needed if the user is using corked io.
* Take the snapshot in case optimized io is enabled after setting the fd.
*/
- GUARD(s2n_socket_read_snapshot(conn));
+ POSIX_GUARD(s2n_socket_read_snapshot(conn));
return 0;
}
+int s2n_connection_get_read_fd(struct s2n_connection *conn, int *readfd)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(readfd);
+ POSIX_ENSURE((conn->managed_recv_io && conn->recv_io_context), S2N_ERR_INVALID_STATE);
+
+ const struct s2n_socket_read_io_context *peer_socket_ctx = conn->recv_io_context;
+ *readfd = peer_socket_ctx->fd;
+ return S2N_SUCCESS;
+}
+
int s2n_connection_set_write_fd(struct s2n_connection *conn, int wfd)
{
struct s2n_blob ctx_mem = {0};
struct s2n_socket_write_io_context *peer_socket_ctx;
- GUARD(s2n_alloc(&ctx_mem, sizeof(struct s2n_socket_write_io_context)));
+ POSIX_ENSURE_REF(conn);
+ POSIX_GUARD(s2n_alloc(&ctx_mem, sizeof(struct s2n_socket_write_io_context)));
peer_socket_ctx = (struct s2n_socket_write_io_context *)(void *)ctx_mem.data;
peer_socket_ctx->fd = wfd;
- s2n_connection_set_send_cb(conn, s2n_socket_write);
- s2n_connection_set_send_ctx(conn, peer_socket_ctx);
- conn->managed_io = 1;
+ POSIX_GUARD(s2n_connection_set_send_cb(conn, s2n_socket_write));
+ POSIX_GUARD(s2n_connection_set_send_ctx(conn, peer_socket_ctx));
+ conn->managed_send_io = true;
/* This is only needed if the user is using corked io.
* Take the snapshot in case optimized io is enabled after setting the fd.
*/
- GUARD(s2n_socket_write_snapshot(conn));
+ POSIX_GUARD(s2n_socket_write_snapshot(conn));
uint8_t ipv6;
if (0 == s2n_socket_is_ipv6(wfd, &ipv6)) {
@@ -944,19 +906,29 @@ int s2n_connection_set_write_fd(struct s2n_connection *conn, int wfd)
return 0;
}
+int s2n_connection_get_write_fd(struct s2n_connection *conn, int *writefd)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(writefd);
+ POSIX_ENSURE((conn->managed_send_io && conn->send_io_context), S2N_ERR_INVALID_STATE);
+
+ const struct s2n_socket_write_io_context *peer_socket_ctx = conn->send_io_context;
+ *writefd = peer_socket_ctx->fd;
+ return S2N_SUCCESS;
+}
int s2n_connection_set_fd(struct s2n_connection *conn, int fd)
{
- GUARD(s2n_connection_set_read_fd(conn, fd));
- GUARD(s2n_connection_set_write_fd(conn, fd));
+ POSIX_GUARD(s2n_connection_set_read_fd(conn, fd));
+ POSIX_GUARD(s2n_connection_set_write_fd(conn, fd));
return 0;
}
int s2n_connection_use_corked_io(struct s2n_connection *conn)
{
- if (!conn->managed_io) {
- /* Caller shouldn't be trying to set s2n IO corked on non-s2n-managed IO */
- S2N_ERROR(S2N_ERR_CORK_SET_ON_UNMANAGED);
- }
+ POSIX_ENSURE_REF(conn);
+
+ /* Caller shouldn't be trying to set s2n IO corked on non-s2n-managed IO */
+ POSIX_ENSURE(conn->managed_send_io, S2N_ERR_CORK_SET_ON_UNMANAGED);
conn->corked_io = 1;
return 0;
@@ -974,79 +946,106 @@ uint64_t s2n_connection_get_wire_bytes_out(struct s2n_connection *conn)
const char *s2n_connection_get_cipher(struct s2n_connection *conn)
{
- notnull_check_ptr(conn);
- notnull_check_ptr(conn->secure.cipher_suite);
+ PTR_ENSURE_REF(conn);
+ PTR_ENSURE_REF(conn->secure.cipher_suite);
return conn->secure.cipher_suite->name;
}
+int s2n_connection_get_cipher_iana_value(struct s2n_connection *conn, uint8_t *first, uint8_t *second)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->secure.cipher_suite);
+ POSIX_ENSURE_MUT(first);
+ POSIX_ENSURE_MUT(second);
+
+ /* ensure we've negotiated a cipher suite */
+ POSIX_ENSURE(
+ memcmp(
+ conn->secure.cipher_suite->iana_value,
+ s2n_null_cipher_suite.iana_value,
+ sizeof(s2n_null_cipher_suite.iana_value)
+ ) != 0,
+ S2N_ERR_INVALID_STATE
+ );
+
+ const uint8_t *iana_value = conn->secure.cipher_suite->iana_value;
+ *first = iana_value[0];
+ *second = iana_value[1];
+
+ return S2N_SUCCESS;
+}
+
const char *s2n_connection_get_curve(struct s2n_connection *conn)
{
- notnull_check_ptr(conn);
+ PTR_ENSURE_REF(conn);
- if (!conn->secure.server_ecc_evp_params.negotiated_curve) {
- return "NONE";
+ if (conn->kex_params.server_ecc_evp_params.negotiated_curve) {
+ /* TLS1.3 currently only uses ECC groups. */
+ if (conn->actual_protocol_version >= S2N_TLS13 || s2n_kex_includes(conn->secure.cipher_suite->key_exchange_alg, &s2n_ecdhe)) {
+ return conn->kex_params.server_ecc_evp_params.negotiated_curve->name;
+ }
}
- return conn->secure.server_ecc_evp_params.negotiated_curve->name;
+ return "NONE";
}
const char *s2n_connection_get_kem_name(struct s2n_connection *conn)
{
- notnull_check_ptr(conn);
+ PTR_ENSURE_REF(conn);
- if (!conn->secure.kem_params.kem) {
+ if (!conn->kex_params.kem_params.kem) {
return "NONE";
}
- return conn->secure.kem_params.kem->name;
+ return conn->kex_params.kem_params.kem->name;
}
const char *s2n_connection_get_kem_group_name(struct s2n_connection *conn)
{
- notnull_check_ptr(conn);
+ PTR_ENSURE_REF(conn);
- if (!conn->secure.chosen_client_kem_group_params || !conn->secure.chosen_client_kem_group_params->kem_group) {
+ if (conn->actual_protocol_version < S2N_TLS13 || !conn->kex_params.client_kem_group_params.kem_group) {
return "NONE";
}
- return conn->secure.chosen_client_kem_group_params->kem_group->name;
+ return conn->kex_params.client_kem_group_params.kem_group->name;
}
int s2n_connection_get_client_protocol_version(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
return conn->client_protocol_version;
}
int s2n_connection_get_server_protocol_version(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
return conn->server_protocol_version;
}
int s2n_connection_get_actual_protocol_version(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
return conn->actual_protocol_version;
}
int s2n_connection_get_client_hello_version(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
return conn->client_hello_version;
}
int s2n_connection_client_cert_used(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
- if ((conn->handshake.handshake_type & CLIENT_AUTH) && is_handshake_complete(conn)) {
- if (conn->handshake.handshake_type & NO_CLIENT_CERT) {
+ if (IS_CLIENT_AUTH_HANDSHAKE(conn) && is_handshake_complete(conn)) {
+ if (IS_CLIENT_AUTH_NO_CERT(conn)) {
return 0;
}
return 1;
@@ -1056,41 +1055,41 @@ int s2n_connection_client_cert_used(struct s2n_connection *conn)
int s2n_connection_get_alert(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
S2N_ERROR_IF(s2n_stuffer_data_available(&conn->alert_in) != 2, S2N_ERR_NO_ALERT);
uint8_t alert_code = 0;
- GUARD(s2n_stuffer_read_uint8(&conn->alert_in, &alert_code));
- GUARD(s2n_stuffer_read_uint8(&conn->alert_in, &alert_code));
+ POSIX_GUARD(s2n_stuffer_read_uint8(&conn->alert_in, &alert_code));
+ POSIX_GUARD(s2n_stuffer_read_uint8(&conn->alert_in, &alert_code));
return alert_code;
}
int s2n_set_server_name(struct s2n_connection *conn, const char *server_name)
{
- notnull_check(conn);
- notnull_check(server_name);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(server_name);
S2N_ERROR_IF(conn->mode != S2N_CLIENT, S2N_ERR_CLIENT_MODE);
int len = strlen(server_name);
S2N_ERROR_IF(len > S2N_MAX_SERVER_NAME, S2N_ERR_SERVER_NAME_TOO_LONG);
- memcpy_check(conn->server_name, server_name, len);
+ POSIX_CHECKED_MEMCPY(conn->server_name, server_name, len);
return 0;
}
const char *s2n_get_server_name(struct s2n_connection *conn)
{
- notnull_check_ptr(conn);
+ PTR_ENSURE_REF(conn);
if (conn->server_name[0]) {
return conn->server_name;
}
- GUARD_PTR(s2n_extension_process(&s2n_client_server_name_extension, conn, &conn->client_hello.extensions));
+ PTR_GUARD_POSIX(s2n_extension_process(&s2n_client_server_name_extension, conn, &conn->client_hello.extensions));
if (!conn->server_name[0]) {
return NULL;
@@ -1101,7 +1100,7 @@ const char *s2n_get_server_name(struct s2n_connection *conn)
const char *s2n_get_application_protocol(struct s2n_connection *conn)
{
- notnull_check_ptr(conn);
+ PTR_ENSURE_REF(conn);
if (strlen(conn->application_protocol) == 0) {
return NULL;
@@ -1112,27 +1111,31 @@ const char *s2n_get_application_protocol(struct s2n_connection *conn)
int s2n_connection_get_session_id_length(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
+ /* Stateful session resumption in TLS1.3 using session id is not yet supported. */
+ if (conn->actual_protocol_version >= S2N_TLS13) {
+ return 0;
+ }
return conn->session_id_len;
}
int s2n_connection_get_session_id(struct s2n_connection *conn, uint8_t *session_id, size_t max_length)
{
- notnull_check(conn);
- notnull_check(session_id);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(session_id);
int session_id_len = s2n_connection_get_session_id_length(conn);
S2N_ERROR_IF(session_id_len > max_length, S2N_ERR_SESSION_ID_TOO_LONG);
- memcpy_check(session_id, conn->session_id, session_id_len);
+ POSIX_CHECKED_MEMCPY(session_id, conn->session_id, session_id_len);
return session_id_len;
}
int s2n_connection_set_blinding(struct s2n_connection *conn, s2n_blinding blinding)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
conn->blinding = blinding;
return 0;
@@ -1149,7 +1152,7 @@ uint64_t s2n_connection_get_delay(struct s2n_connection *conn)
uint64_t elapsed;
/* This will cast -1 to max uint64_t */
- GUARD_AS_POSIX(s2n_timer_elapsed(conn->config, &conn->write_timer, &elapsed));
+ POSIX_GUARD_RESULT(s2n_timer_elapsed(conn->config, &conn->write_timer, &elapsed));
if (elapsed > conn->delay) {
return 0;
@@ -1158,9 +1161,53 @@ uint64_t s2n_connection_get_delay(struct s2n_connection *conn)
return conn->delay - elapsed;
}
+S2N_CLEANUP_RESULT s2n_connection_apply_error_blinding(struct s2n_connection **conn)
+{
+ RESULT_ENSURE_REF(conn);
+ if (*conn == NULL) {
+ return S2N_RESULT_OK;
+ }
+
+ int error_code = s2n_errno;
+ int error_type = s2n_error_get_type(error_code);
+
+ switch(error_type) {
+ case S2N_ERR_T_OK:
+ /* Ignore no error */
+ return S2N_RESULT_OK;
+ case S2N_ERR_T_BLOCKED:
+ /* All blocking errors are retriable and should trigger no further action. */
+ return S2N_RESULT_OK;
+ default:
+ break;
+ }
+
+ switch(error_code) {
+ /* Don't invoke blinding on some of the common errors.
+ *
+ * Be careful adding new errors here. Disabling blinding for an
+ * error that can be triggered by secret / encrypted values can
+ * potentially lead to a side channel attack.
+ *
+ * We may want to someday add an explicit error type for these errors.
+ */
+ case S2N_ERR_CANCELLED:
+ case S2N_ERR_CIPHER_NOT_SUPPORTED:
+ case S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED:
+ (*conn)->closed = 1;
+ break;
+ default:
+ /* Apply blinding to all other errors */
+ RESULT_GUARD_POSIX(s2n_connection_kill(*conn));
+ break;
+ }
+
+ return S2N_RESULT_OK;
+}
+
int s2n_connection_kill(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
conn->closed = 1;
@@ -1169,12 +1216,12 @@ int s2n_connection_kill(struct s2n_connection *conn)
/* Keep track of the delay so that it can be enforced */
uint64_t rand_delay = 0;
- GUARD_AS_POSIX(s2n_public_random(max - min, &rand_delay));
+ POSIX_GUARD_RESULT(s2n_public_random(max - min, &rand_delay));
conn->delay = min + rand_delay;
/* Restart the write timer */
- GUARD_AS_POSIX(s2n_timer_start(conn->config, &conn->write_timer));
+ POSIX_GUARD_RESULT(s2n_timer_start(conn->config, &conn->write_timer));
if (conn->blinding == S2N_BUILT_IN_BLINDING) {
struct timespec sleep_time = {.tv_sec = conn->delay / ONE_S,.tv_nsec = conn->delay % ONE_S };
@@ -1191,38 +1238,56 @@ int s2n_connection_kill(struct s2n_connection *conn)
const uint8_t *s2n_connection_get_ocsp_response(struct s2n_connection *conn, uint32_t * length)
{
- notnull_check_ptr(conn);
- notnull_check_ptr(length);
+ PTR_ENSURE_REF(conn);
+ PTR_ENSURE_REF(length);
*length = conn->status_response.size;
return conn->status_response.data;
}
-int s2n_connection_prefer_throughput(struct s2n_connection *conn)
+S2N_RESULT s2n_connection_set_max_fragment_length(struct s2n_connection *conn, uint16_t max_frag_length)
{
- notnull_check(conn);
+ RESULT_ENSURE_REF(conn);
- if (!conn->mfl_code) {
- conn->max_outgoing_fragment_length = S2N_LARGE_FRAGMENT_LENGTH;
+ if (conn->negotiated_mfl_code) {
+ /* Respect the upper limit agreed on with the peer */
+ RESULT_ENSURE_LT(conn->negotiated_mfl_code, s2n_array_len(mfl_code_to_length));
+ conn->max_outgoing_fragment_length = MIN(mfl_code_to_length[conn->negotiated_mfl_code], max_frag_length);
+ } else {
+ conn->max_outgoing_fragment_length = max_frag_length;
}
- return 0;
+ /* If no buffer has been initialized yet, no need to resize.
+ * The standard I/O logic will handle initializing the buffer.
+ */
+ if (s2n_stuffer_is_freed(&conn->out)) {
+ return S2N_RESULT_OK;
+ }
+
+ uint16_t max_wire_record_size = 0;
+ RESULT_GUARD(s2n_record_max_write_size(conn, conn->max_outgoing_fragment_length, &max_wire_record_size));
+ if ((conn->out.blob.size < max_wire_record_size)) {
+ RESULT_GUARD_POSIX(s2n_realloc(&conn->out.blob, max_wire_record_size));
+ }
+
+ return S2N_RESULT_OK;
}
-int s2n_connection_prefer_low_latency(struct s2n_connection *conn)
+int s2n_connection_prefer_throughput(struct s2n_connection *conn)
{
- notnull_check(conn);
-
- if (!conn->mfl_code) {
- conn->max_outgoing_fragment_length = S2N_SMALL_FRAGMENT_LENGTH;
- }
+ POSIX_GUARD_RESULT(s2n_connection_set_max_fragment_length(conn, S2N_LARGE_FRAGMENT_LENGTH));
+ return S2N_SUCCESS;
+}
- return 0;
+int s2n_connection_prefer_low_latency(struct s2n_connection *conn)
+{
+ POSIX_GUARD_RESULT(s2n_connection_set_max_fragment_length(conn, S2N_SMALL_FRAGMENT_LENGTH));
+ return S2N_SUCCESS;
}
int s2n_connection_set_dynamic_record_threshold(struct s2n_connection *conn, uint32_t resize_threshold, uint16_t timeout_threshold)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
S2N_ERROR_IF(resize_threshold > S2N_TLS_MAX_RESIZE_THRESHOLD, S2N_ERR_INVALID_DYNAMIC_THRESHOLD);
conn->dynamic_record_resize_threshold = resize_threshold;
@@ -1231,7 +1296,7 @@ int s2n_connection_set_dynamic_record_threshold(struct s2n_connection *conn, uin
}
int s2n_connection_set_verify_host_callback(struct s2n_connection *conn, s2n_verify_host_fn verify_host_fn, void *data) {
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
conn->verify_host_fn = verify_host_fn;
conn->data_for_verify_host = data;
@@ -1242,9 +1307,9 @@ int s2n_connection_set_verify_host_callback(struct s2n_connection *conn, s2n_ver
int s2n_connection_recv_stuffer(struct s2n_stuffer *stuffer, struct s2n_connection *conn, uint32_t len)
{
- notnull_check(conn->recv);
+ POSIX_ENSURE_REF(conn->recv);
/* Make sure we have enough space to write */
- GUARD(s2n_stuffer_reserve_space(stuffer, len));
+ POSIX_GUARD(s2n_stuffer_reserve_space(stuffer, len));
int r = 0;
do {
@@ -1254,16 +1319,16 @@ int s2n_connection_recv_stuffer(struct s2n_stuffer *stuffer, struct s2n_connecti
} while (r < 0);
/* Record just how many bytes we have written */
- GUARD(s2n_stuffer_skip_write(stuffer, r));
+ POSIX_GUARD(s2n_stuffer_skip_write(stuffer, r));
return r;
}
int s2n_connection_send_stuffer(struct s2n_stuffer *stuffer, struct s2n_connection *conn, uint32_t len)
{
- notnull_check(conn);
- notnull_check(conn->send);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->send);
if (conn->write_fd_broken) {
- S2N_ERROR(S2N_ERR_SEND_STUFFER_TO_CONN);
+ POSIX_BAIL(S2N_ERR_SEND_STUFFER_TO_CONN);
}
/* Make sure we even have the data */
S2N_ERROR_IF(s2n_stuffer_data_available(stuffer) < len, S2N_ERR_STUFFER_OUT_OF_DATA);
@@ -1278,15 +1343,15 @@ int s2n_connection_send_stuffer(struct s2n_stuffer *stuffer, struct s2n_connecti
S2N_ERROR_IF(w < 0 && errno != EINTR, S2N_ERR_SEND_STUFFER_TO_CONN);
} while (w < 0);
- GUARD(s2n_stuffer_skip_read(stuffer, w));
+ POSIX_GUARD(s2n_stuffer_skip_read(stuffer, w));
return w;
}
int s2n_connection_is_managed_corked(const struct s2n_connection *s2n_connection)
{
- notnull_check(s2n_connection);
+ POSIX_ENSURE_REF(s2n_connection);
- return (s2n_connection->managed_io && s2n_connection->corked_io);
+ return (s2n_connection->managed_send_io && s2n_connection->corked_io);
}
const uint8_t *s2n_connection_get_sct_list(struct s2n_connection *conn, uint32_t *length)
@@ -1302,14 +1367,14 @@ const uint8_t *s2n_connection_get_sct_list(struct s2n_connection *conn, uint32_t
int s2n_connection_is_client_auth_enabled(struct s2n_connection *s2n_connection)
{
s2n_cert_auth_type auth_type;
- GUARD(s2n_connection_get_client_auth_type(s2n_connection, &auth_type));
+ POSIX_GUARD(s2n_connection_get_client_auth_type(s2n_connection, &auth_type));
return (auth_type != S2N_CERT_AUTH_NONE);
}
struct s2n_cert_chain_and_key *s2n_connection_get_selected_cert(struct s2n_connection *conn)
{
- notnull_check_ptr(conn);
+ PTR_ENSURE_REF(conn);
return conn->handshake_params.our_chain_and_key;
}
@@ -1329,26 +1394,168 @@ uint8_t s2n_connection_get_protocol_version(const struct s2n_connection *conn)
return conn->server_protocol_version;
}
-int s2n_connection_set_keyshare_by_name_for_testing(struct s2n_connection *conn, const char* curve_name)
+DEFINE_POINTER_CLEANUP_FUNC(struct s2n_cert_chain *, s2n_cert_chain_free);
+
+int s2n_connection_get_peer_cert_chain(const struct s2n_connection *conn, struct s2n_cert_chain_and_key *cert_chain_and_key)
{
- ENSURE_POSIX(S2N_IN_TEST, S2N_ERR_NOT_IN_TEST);
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(cert_chain_and_key);
+
+ DEFER_CLEANUP(struct s2n_cert_chain *cert_chain = cert_chain_and_key->cert_chain, s2n_cert_chain_free_pointer);
+ struct s2n_cert **insert = &cert_chain->head;
+ POSIX_ENSURE(*insert == NULL, S2N_ERR_INVALID_ARGUMENT);
+
+ const struct s2n_x509_validator *validator = &conn->x509_validator;
+ POSIX_ENSURE_REF(validator);
+ POSIX_ENSURE(s2n_x509_validator_is_cert_chain_validated(validator), S2N_ERR_CERT_NOT_VALIDATED);
+
+ /* X509_STORE_CTX_get1_chain() returns a validated cert chain if a previous call to X509_verify_cert() was successful.
+ * X509_STORE_CTX_get0_chain() is a better API because it doesn't return a copy. But it's not available for Openssl 1.0.2.
+ * See the comments here:
+ * https://www.openssl.org/docs/man1.0.2/man3/X509_STORE_CTX_get1_chain.html
+ */
+ DEFER_CLEANUP(STACK_OF(X509) *cert_chain_validated = X509_STORE_CTX_get1_chain(validator->store_ctx),
+ s2n_openssl_x509_stack_pop_free);
+ POSIX_ENSURE_REF(cert_chain_validated);
+
+ for (size_t cert_idx = 0; cert_idx < sk_X509_num(cert_chain_validated); cert_idx++) {
+ X509 *cert = sk_X509_value(cert_chain_validated, cert_idx);
+ POSIX_ENSURE_REF(cert);
+ DEFER_CLEANUP(uint8_t *cert_data = NULL, s2n_crypto_free);
+ int cert_size = i2d_X509(cert, &cert_data);
+ POSIX_ENSURE_GT(cert_size, 0);
- if (!strcmp(curve_name, "none")) {
- S2N_SET_KEY_SHARE_LIST_EMPTY(conn->preferred_key_shares);
- return S2N_SUCCESS;
+ struct s2n_blob mem = { 0 };
+ POSIX_GUARD(s2n_alloc(&mem, sizeof(struct s2n_cert)));
+
+ struct s2n_cert *new_node = (struct s2n_cert *)(void *)mem.data;
+ POSIX_ENSURE_REF(new_node);
+
+ new_node->next = NULL;
+ *insert = new_node;
+ insert = &new_node->next;
+
+ POSIX_GUARD(s2n_alloc(&new_node->raw, cert_size));
+ POSIX_CHECKED_MEMCPY(new_node->raw.data, cert_data, cert_size);
}
- const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
+ ZERO_TO_DISABLE_DEFER_CLEANUP(cert_chain);
- for (size_t i = 0; i < ecc_pref->count; i++) {
- if (!strcmp(ecc_pref->ecc_curves[i]->name, curve_name)) {
- S2N_SET_KEY_SHARE_REQUEST(conn->preferred_key_shares, i);
- return S2N_SUCCESS;
- }
+ return S2N_SUCCESS;
+}
+
+static S2N_RESULT s2n_signature_scheme_to_tls_iana(struct s2n_signature_scheme *sig_scheme, s2n_tls_hash_algorithm *converted_scheme)
+{
+ RESULT_ENSURE_REF(sig_scheme);
+ RESULT_ENSURE_REF(converted_scheme);
+
+ switch (sig_scheme->hash_alg) {
+ case S2N_HASH_MD5:
+ *converted_scheme = S2N_TLS_HASH_MD5;
+ break;
+ case S2N_HASH_SHA1:
+ *converted_scheme = S2N_TLS_HASH_SHA1;
+ break;
+ case S2N_HASH_SHA224:
+ *converted_scheme = S2N_TLS_HASH_SHA224;
+ break;
+ case S2N_HASH_SHA256:
+ *converted_scheme = S2N_TLS_HASH_SHA256;
+ break;
+ case S2N_HASH_SHA384:
+ *converted_scheme = S2N_TLS_HASH_SHA384;
+ break;
+ case S2N_HASH_SHA512:
+ *converted_scheme = S2N_TLS_HASH_SHA512;
+ break;
+ case S2N_HASH_MD5_SHA1:
+ *converted_scheme = S2N_TLS_HASH_MD5_SHA1;
+ break;
+ default:
+ *converted_scheme = S2N_TLS_HASH_NONE;
+ break;
+ }
+
+ return S2N_RESULT_OK;
+}
+
+int s2n_connection_get_selected_digest_algorithm(struct s2n_connection *conn, s2n_tls_hash_algorithm *converted_scheme)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(converted_scheme);
+
+ POSIX_GUARD_RESULT(s2n_signature_scheme_to_tls_iana(&conn->handshake_params.conn_sig_scheme, converted_scheme));
+
+ return S2N_SUCCESS;
+}
+
+int s2n_connection_get_selected_client_cert_digest_algorithm(struct s2n_connection *conn, s2n_tls_hash_algorithm *converted_scheme)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(converted_scheme);
+
+ POSIX_GUARD_RESULT(s2n_signature_scheme_to_tls_iana(&conn->handshake_params.client_cert_sig_scheme, converted_scheme));
+ return S2N_SUCCESS;
+}
+
+static S2N_RESULT s2n_signature_scheme_to_signature_algorithm(struct s2n_signature_scheme *sig_scheme, s2n_tls_signature_algorithm *converted_scheme)
+{
+ RESULT_ENSURE_REF(sig_scheme);
+ RESULT_ENSURE_REF(converted_scheme);
+
+ switch (sig_scheme->sig_alg) {
+ case S2N_SIGNATURE_RSA:
+ *converted_scheme = S2N_TLS_SIGNATURE_RSA;
+ break;
+ case S2N_SIGNATURE_ECDSA:
+ *converted_scheme = S2N_TLS_SIGNATURE_ECDSA;
+ break;
+ case S2N_SIGNATURE_RSA_PSS_RSAE:
+ *converted_scheme = S2N_TLS_SIGNATURE_RSA_PSS_RSAE;
+ break;
+ case S2N_SIGNATURE_RSA_PSS_PSS:
+ *converted_scheme = S2N_TLS_SIGNATURE_RSA_PSS_PSS;
+ break;
+ default:
+ *converted_scheme = S2N_TLS_SIGNATURE_ANONYMOUS;
+ break;
}
- S2N_ERROR(S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
+ return S2N_RESULT_OK;
+}
+
+int s2n_connection_get_selected_signature_algorithm(struct s2n_connection *conn, s2n_tls_signature_algorithm *converted_scheme)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(converted_scheme);
+
+ POSIX_GUARD_RESULT(s2n_signature_scheme_to_signature_algorithm(&conn->handshake_params.conn_sig_scheme, converted_scheme));
+
+ return S2N_SUCCESS;
+}
+
+int s2n_connection_get_selected_client_cert_signature_algorithm(struct s2n_connection *conn, s2n_tls_signature_algorithm *converted_scheme)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(converted_scheme);
+
+ POSIX_GUARD_RESULT(s2n_signature_scheme_to_signature_algorithm(&conn->handshake_params.client_cert_sig_scheme, converted_scheme));
+
+ return S2N_SUCCESS;
+}
+
+/*
+ * Gets the config set on the connection.
+ */
+int s2n_connection_get_config(struct s2n_connection *conn, struct s2n_config **config) {
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(config);
+
+ if (s2n_fetch_default_config() == conn->config) {
+ POSIX_BAIL(S2N_ERR_NULL);
+ }
+
+ *config = conn->config;
+
+ return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_connection.h b/contrib/restricted/aws/s2n/tls/s2n_connection.h
index 9864111db1..0332241d63 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_connection.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_connection.h
@@ -16,7 +16,7 @@
#pragma once
#include <errno.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include <signal.h>
#include <stdint.h>
@@ -25,15 +25,17 @@
#include "tls/s2n_client_hello.h"
#include "tls/s2n_config.h"
#include "tls/s2n_crypto.h"
+#include "tls/s2n_early_data.h"
+#include "tls/s2n_ecc_preferences.h"
#include "tls/s2n_handshake.h"
+#include "tls/s2n_kem_preferences.h"
+#include "tls/s2n_key_update.h"
#include "tls/s2n_prf.h"
#include "tls/s2n_quic_support.h"
+#include "tls/s2n_record.h"
+#include "tls/s2n_security_policies.h"
#include "tls/s2n_tls_parameters.h"
#include "tls/s2n_x509_validator.h"
-#include "tls/s2n_key_update.h"
-#include "tls/s2n_kem_preferences.h"
-#include "tls/s2n_ecc_preferences.h"
-#include "tls/s2n_security_policies.h"
#include "crypto/s2n_hash.h"
#include "crypto/s2n_hmac.h"
@@ -43,6 +45,8 @@
#define S2N_TLS_PROTOCOL_VERSION_LEN 2
+#define S2N_PEER_MODE(our_mode) ((our_mode + 1) % 2)
+
#define is_handshake_complete(conn) (APPLICATION_DATA == s2n_conn_get_current_message_type(conn))
typedef enum {
@@ -52,39 +56,17 @@ typedef enum {
} s2n_session_ticket_status;
struct s2n_connection {
- /* The configuration (cert, key .. etc ) */
- struct s2n_config *config;
-
- /* Overrides Security Policy in config if non-null */
- const struct s2n_security_policy *security_policy_override;
-
- /* The user defined context associated with connection */
- void *context;
-
- /* The user defined secret callback and context */
- s2n_secret_cb secret_cb;
- void *secret_cb_context;
-
- /* The send and receive callbacks don't have to be the same (e.g. two pipes) */
- s2n_send_fn *send;
- s2n_recv_fn *recv;
-
- /* The context passed to the I/O callbacks */
- void *send_io_context;
- void *recv_io_context;
-
- /* Has the user set their own I/O callbacks or is this connection using the
- * default socket-based I/O set by s2n */
- uint8_t managed_io;
-
/* Is this connection using CORK/SO_RCVLOWAT optimizations? Only valid when the connection is using
- * managed_io
+ * managed_send_io
*/
unsigned corked_io:1;
/* Session resumption indicator on client side */
unsigned client_session_resumed:1;
+ /* Connection can be used by a QUIC implementation */
+ unsigned quic_enabled:1;
+
/* Determines if we're currently sending or receiving in s2n_shutdown */
unsigned close_notify_queued:1;
@@ -110,6 +92,58 @@ struct s2n_connection {
/* If write fd is broken */
unsigned write_fd_broken:1;
+ /* Has the user set their own I/O callbacks or is this connection using the
+ * default socket-based I/O set by s2n */
+ unsigned managed_send_io:1;
+ unsigned managed_recv_io:1;
+
+ /* Key update data */
+ unsigned key_update_pending:1;
+
+ /* Early data supported by caller.
+ * If a caller does not use any APIs that support early data,
+ * do not negotiate early data.
+ */
+ unsigned early_data_expected:1;
+
+ /* Connection overrides server_max_early_data_size */
+ unsigned server_max_early_data_size_overridden:1;
+
+ /* Connection overrides psk_mode.
+ * This means that the connection will keep the existing value of psk_params->type,
+ * even when setting a new config. */
+ unsigned psk_mode_overridden:1;
+
+ /* Have we received a close notify alert from the peer. */
+ unsigned close_notify_received:1;
+
+ /* Connection negotiated an EMS */
+ unsigned ems_negotiated:1;
+
+ /* Connection successfully set a ticket on the connection */
+ unsigned set_session:1;
+
+ /* The configuration (cert, key .. etc ) */
+ struct s2n_config *config;
+
+ /* Overrides Security Policy in config if non-null */
+ const struct s2n_security_policy *security_policy_override;
+
+ /* The user defined context associated with connection */
+ void *context;
+
+ /* The user defined secret callback and context */
+ s2n_secret_cb secret_cb;
+ void *secret_cb_context;
+
+ /* The send and receive callbacks don't have to be the same (e.g. two pipes) */
+ s2n_send_fn *send;
+ s2n_recv_fn *recv;
+
+ /* The context passed to the I/O callbacks */
+ void *send_io_context;
+ void *recv_io_context;
+
/* Track request extensions to ensure correct response extension behavior.
*
* We need to track client and server extensions separately because some
@@ -156,11 +190,15 @@ struct s2n_connection {
/* Our crypto parameters */
struct s2n_crypto_parameters initial;
struct s2n_crypto_parameters secure;
+ union s2n_secrets secrets;
/* Which set is the client/server actually using? */
struct s2n_crypto_parameters *client;
struct s2n_crypto_parameters *server;
+ /* Contains parameters needed to negotiate a shared secret */
+ struct s2n_kex_parameters kex_params;
+
/* Contains parameters needed during the handshake phase */
struct s2n_handshake_parameters handshake_params;
@@ -168,7 +206,7 @@ struct s2n_connection {
struct s2n_psk_parameters psk_params;
/* The PRF needs some storage elements to work with */
- struct s2n_prf_working_space prf_space;
+ struct s2n_prf_working_space *prf_space;
/* Whether to use client_cert_auth_type stored in s2n_config or in this s2n_connection.
*
@@ -241,12 +279,14 @@ struct s2n_connection {
/* number of bytes consumed during application activity */
uint64_t active_application_bytes_consumed;
- /* Negotiated TLS extension Maximum Fragment Length code */
- uint8_t mfl_code;
+ /* Negotiated TLS extension Maximum Fragment Length code.
+ * If set, the client and server have both agreed to fragment their records to the given length. */
+ uint8_t negotiated_mfl_code;
/* Keep some accounting on each connection */
uint64_t wire_bytes_in;
uint64_t wire_bytes_out;
+ uint64_t early_data_bytes;
/* Is the connection open or closed ? We use C's only
* atomic type as both the reader and the writer threads
@@ -302,37 +342,36 @@ struct s2n_connection {
s2n_session_ticket_status session_ticket_status;
struct s2n_blob client_ticket;
uint32_t ticket_lifetime_hint;
+ struct s2n_ticket_fields tls13_ticket_fields;
/* Session ticket extension from client to attempt to decrypt as the server. */
- uint8_t ticket_ext_data[S2N_TICKET_SIZE_IN_BYTES];
+ uint8_t ticket_ext_data[S2N_TLS12_TICKET_SIZE_IN_BYTES];
struct s2n_stuffer client_ticket_to_decrypt;
- uint8_t resumption_master_secret[S2N_TLS13_SECRET_MAX_LEN];
-
/* application protocols overridden */
struct s2n_blob application_protocols_overridden;
/* Cookie extension data */
struct s2n_stuffer cookie_stuffer;
- /* Key update data */
- unsigned key_update_pending:1;
-
- /* Bitmap to represent preferred list of keyshare for client to generate and send keyshares in the ClientHello message.
- * The least significant bit (lsb), if set, indicates that the client must send an empty keyshare list.
- * Each bit value in the bitmap indiciates the corresponding curve in the ecc_preferences list for which a key share needs to be generated.
- * The order of the curves represented in the bitmap is obtained from the security_policy->ecc_preferences.
- * Setting and manipulating this value requires security_policy to be configured prior.
- * */
- uint8_t preferred_key_shares;
-
/* Flags to prevent users from calling methods recursively.
- * This can be an easy mistake to make when implementing send/receive callbacks.
+ * This can be an easy mistake to make when implementing callbacks.
*/
bool send_in_use;
bool recv_in_use;
+ bool negotiate_in_use;
+
+ uint16_t tickets_to_send;
+ uint16_t tickets_sent;
+
+ s2n_early_data_state early_data_state;
+ uint32_t server_max_early_data_size;
+ struct s2n_blob server_early_data_context;
+ uint32_t server_keying_material_lifetime;
};
+S2N_CLEANUP_RESULT s2n_connection_ptr_free(struct s2n_connection **s2n_connection);
+
int s2n_connection_is_managed_corked(const struct s2n_connection *s2n_connection);
int s2n_connection_is_client_auth_enabled(struct s2n_connection *s2n_connection);
@@ -343,6 +382,8 @@ int s2n_connection_kill(struct s2n_connection *conn);
int s2n_connection_send_stuffer(struct s2n_stuffer *stuffer, struct s2n_connection *conn, uint32_t len);
int s2n_connection_recv_stuffer(struct s2n_stuffer *stuffer, struct s2n_connection *conn, uint32_t len);
+S2N_RESULT s2n_connection_wipe_all_keyshares(struct s2n_connection *conn);
+
int s2n_connection_get_cipher_preferences(struct s2n_connection *conn, const struct s2n_cipher_preferences **cipher_preferences);
int s2n_connection_get_security_policy(struct s2n_connection *conn, const struct s2n_security_policy **security_policy);
int s2n_connection_get_kem_preferences(struct s2n_connection *conn, const struct s2n_kem_preferences **kem_preferences);
@@ -352,6 +393,6 @@ int s2n_connection_get_protocol_preferences(struct s2n_connection *conn, struct
int s2n_connection_set_client_auth_type(struct s2n_connection *conn, s2n_cert_auth_type cert_auth_type);
int s2n_connection_get_client_auth_type(struct s2n_connection *conn, s2n_cert_auth_type *client_cert_auth_type);
int s2n_connection_get_client_cert_chain(struct s2n_connection *conn, uint8_t **der_cert_chain_out, uint32_t *cert_chain_len);
+int s2n_connection_get_peer_cert_chain(const struct s2n_connection *conn, struct s2n_cert_chain_and_key *cert_chain_and_key);
uint8_t s2n_connection_get_protocol_version(const struct s2n_connection *conn);
-/* `none` keyword represents a list of empty keyshares */
-int s2n_connection_set_keyshare_by_name_for_testing(struct s2n_connection *conn, const char* curve_name);
+S2N_RESULT s2n_connection_set_max_fragment_length(struct s2n_connection *conn, uint16_t length);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_connection_evp_digests.c b/contrib/restricted/aws/s2n/tls/s2n_connection_evp_digests.c
index f7c08dc594..856ca79f89 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_connection_evp_digests.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_connection_evp_digests.c
@@ -18,109 +18,15 @@
#include "utils/s2n_safety.h"
/* On s2n_connection_wipe, save all pointers to OpenSSL EVP digest structs in a temporary
- * s2n_connection_prf_handles struct to avoid re-allocation after zeroing the connection struct.
- * Do not store any additional hash/HMAC state as it is unnecessary and excessive copying would impact performance.
- */
-int s2n_connection_save_prf_state(struct s2n_connection_prf_handles *prf_handles, struct s2n_connection *conn)
-{
- /* Preserve only the handlers for TLS PRF p_hash pointers to avoid re-allocation */
- GUARD(s2n_hmac_save_evp_hash_state(&prf_handles->p_hash_s2n_hmac, &conn->prf_space.tls.p_hash.s2n_hmac));
- prf_handles->p_hash_evp_hmac = conn->prf_space.tls.p_hash.evp_hmac;
-
- return 0;
-}
-
-/* On s2n_connection_wipe, save all pointers to OpenSSL EVP digest structs in a temporary
- * s2n_connection_hash_handles struct to avoid re-allocation after zeroing the connection struct.
- * Do not store any additional hash state as it is unnecessary and excessive copying would impact performance.
- */
-int s2n_connection_save_hash_state(struct s2n_connection_hash_handles *hash_handles, struct s2n_connection *conn)
-{
- /* Preserve only the handlers for handshake hash state pointers to avoid re-allocation */
- hash_handles->md5 = conn->handshake.md5.digest.high_level;
- hash_handles->sha1 = conn->handshake.sha1.digest.high_level;
- hash_handles->sha224 = conn->handshake.sha224.digest.high_level;
- hash_handles->sha256 = conn->handshake.sha256.digest.high_level;
- hash_handles->sha384 = conn->handshake.sha384.digest.high_level;
- hash_handles->sha512 = conn->handshake.sha512.digest.high_level;
- hash_handles->md5_sha1 = conn->handshake.md5_sha1.digest.high_level;
- hash_handles->ccv_hash_copy = conn->handshake.ccv_hash_copy.digest.high_level;
- hash_handles->prf_md5_hash_copy = conn->handshake.prf_md5_hash_copy.digest.high_level;
- hash_handles->prf_sha1_hash_copy = conn->handshake.prf_sha1_hash_copy.digest.high_level;
- hash_handles->prf_tls12_hash_copy = conn->handshake.prf_tls12_hash_copy.digest.high_level;
- hash_handles->server_finished_copy = conn->handshake.server_finished_copy.digest.high_level;
-
- /* Preserve only the handlers for SSLv3 PRF hash state pointers to avoid re-allocation */
- hash_handles->prf_md5 = conn->prf_space.ssl3.md5.digest.high_level;
- hash_handles->prf_sha1 = conn->prf_space.ssl3.sha1.digest.high_level;
-
- /* Preserve only the handlers for initial signature hash state pointers to avoid re-allocation */
- hash_handles->initial_signature_hash = conn->initial.signature_hash.digest.high_level;
-
- /* Preserve only the handlers for secure signature hash state pointers to avoid re-allocation */
- hash_handles->secure_signature_hash = conn->secure.signature_hash.digest.high_level;
-
- return 0;
-}
-
-/* On s2n_connection_wipe, save all pointers to OpenSSL EVP digest structs in a temporary
* s2n_connection_hmac_handles struct to avoid re-allocation after zeroing the connection struct.
* Do not store any additional HMAC state as it is unnecessary and excessive copying would impact performance.
*/
int s2n_connection_save_hmac_state(struct s2n_connection_hmac_handles *hmac_handles, struct s2n_connection *conn)
{
- GUARD(s2n_hmac_save_evp_hash_state(&hmac_handles->initial_client, &conn->initial.client_record_mac));
- GUARD(s2n_hmac_save_evp_hash_state(&hmac_handles->initial_server, &conn->initial.server_record_mac));
- GUARD(s2n_hmac_save_evp_hash_state(&hmac_handles->initial_client_copy, &conn->initial.record_mac_copy_workspace));
- GUARD(s2n_hmac_save_evp_hash_state(&hmac_handles->secure_client, &conn->secure.client_record_mac));
- GUARD(s2n_hmac_save_evp_hash_state(&hmac_handles->secure_server, &conn->secure.server_record_mac));
- GUARD(s2n_hmac_save_evp_hash_state(&hmac_handles->secure_client_copy, &conn->secure.record_mac_copy_workspace));
- return 0;
-}
-
-/* On s2n_connection_wipe, restore all pointers to OpenSSL EVP digest structs after zeroing the connection struct
- * to avoid re-allocation. Do not store any additional hash/HMAC state as it is unnecessary and excessive copying
- * would impact performance.
- */
-int s2n_connection_restore_prf_state(struct s2n_connection *conn, struct s2n_connection_prf_handles *prf_handles)
-{
- /* Restore s2n_connection handlers for TLS PRF p_hash */
- GUARD(s2n_hmac_restore_evp_hash_state(&prf_handles->p_hash_s2n_hmac, &conn->prf_space.tls.p_hash.s2n_hmac));
- conn->prf_space.tls.p_hash.evp_hmac = prf_handles->p_hash_evp_hmac;
-
- return 0;
-}
-
-/* On s2n_connection_wipe, restore all pointers to OpenSSL EVP digest structs after zeroing the connection struct
- * to avoid re-allocation. Do not store any additional hash state as it is unnecessary and excessive copying
- * would impact performance.
- */
-int s2n_connection_restore_hash_state(struct s2n_connection *conn, struct s2n_connection_hash_handles *hash_handles)
-{
- /* Restore s2n_connection handlers for handshake hash states */
- conn->handshake.md5.digest.high_level = hash_handles->md5;
- conn->handshake.sha1.digest.high_level = hash_handles->sha1;
- conn->handshake.sha224.digest.high_level = hash_handles->sha224;
- conn->handshake.sha256.digest.high_level = hash_handles->sha256;
- conn->handshake.sha384.digest.high_level = hash_handles->sha384;
- conn->handshake.sha512.digest.high_level = hash_handles->sha512;
- conn->handshake.md5_sha1.digest.high_level = hash_handles->md5_sha1;
- conn->handshake.ccv_hash_copy.digest.high_level = hash_handles->ccv_hash_copy;
- conn->handshake.prf_md5_hash_copy.digest.high_level = hash_handles->prf_md5_hash_copy;
- conn->handshake.prf_sha1_hash_copy.digest.high_level = hash_handles->prf_sha1_hash_copy;
- conn->handshake.prf_tls12_hash_copy.digest.high_level = hash_handles->prf_tls12_hash_copy;
- conn->handshake.server_finished_copy.digest.high_level = hash_handles->server_finished_copy;
-
- /* Restore s2n_connection handlers for SSLv3 PRF hash states */
- conn->prf_space.ssl3.md5.digest.high_level = hash_handles->prf_md5;
- conn->prf_space.ssl3.sha1.digest.high_level = hash_handles->prf_sha1;
-
- /* Restore s2n_connection handlers for initial signature hash states */
- conn->initial.signature_hash.digest.high_level = hash_handles->initial_signature_hash;
-
- /* Restore s2n_connection handlers for secure signature hash states */
- conn->secure.signature_hash.digest.high_level = hash_handles->secure_signature_hash;
-
+ POSIX_GUARD(s2n_hmac_save_evp_hash_state(&hmac_handles->initial_client, &conn->initial.client_record_mac));
+ POSIX_GUARD(s2n_hmac_save_evp_hash_state(&hmac_handles->initial_server, &conn->initial.server_record_mac));
+ POSIX_GUARD(s2n_hmac_save_evp_hash_state(&hmac_handles->secure_client, &conn->secure.client_record_mac));
+ POSIX_GUARD(s2n_hmac_save_evp_hash_state(&hmac_handles->secure_server, &conn->secure.server_record_mac));
return 0;
}
@@ -130,11 +36,9 @@ int s2n_connection_restore_hash_state(struct s2n_connection *conn, struct s2n_co
*/
int s2n_connection_restore_hmac_state(struct s2n_connection *conn, struct s2n_connection_hmac_handles *hmac_handles)
{
- GUARD(s2n_hmac_restore_evp_hash_state(&hmac_handles->initial_client, &conn->initial.client_record_mac));
- GUARD(s2n_hmac_restore_evp_hash_state(&hmac_handles->initial_server, &conn->initial.server_record_mac));
- GUARD(s2n_hmac_restore_evp_hash_state(&hmac_handles->initial_client_copy, &conn->initial.record_mac_copy_workspace));
- GUARD(s2n_hmac_restore_evp_hash_state(&hmac_handles->secure_client, &conn->secure.client_record_mac));
- GUARD(s2n_hmac_restore_evp_hash_state(&hmac_handles->secure_server, &conn->secure.server_record_mac));
- GUARD(s2n_hmac_restore_evp_hash_state(&hmac_handles->secure_client_copy, &conn->secure.record_mac_copy_workspace));
+ POSIX_GUARD(s2n_hmac_restore_evp_hash_state(&hmac_handles->initial_client, &conn->initial.client_record_mac));
+ POSIX_GUARD(s2n_hmac_restore_evp_hash_state(&hmac_handles->initial_server, &conn->initial.server_record_mac));
+ POSIX_GUARD(s2n_hmac_restore_evp_hash_state(&hmac_handles->secure_client, &conn->secure.client_record_mac));
+ POSIX_GUARD(s2n_hmac_restore_evp_hash_state(&hmac_handles->secure_server, &conn->secure.server_record_mac));
return 0;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_connection_evp_digests.h b/contrib/restricted/aws/s2n/tls/s2n_connection_evp_digests.h
index 80e16823ba..e94e19d010 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_connection_evp_digests.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_connection_evp_digests.h
@@ -20,38 +20,6 @@
#include "crypto/s2n_hash.h"
-struct s2n_connection_prf_handles {
- /* TLS PRF HMAC p_hash */
- struct s2n_hmac_evp_backup p_hash_s2n_hmac;
-
- /* TLS PRF EVP p_hash */
- struct s2n_evp_hmac_state p_hash_evp_hmac;
-};
-
-struct s2n_connection_hash_handles {
- /* Handshake hash states */
- struct s2n_hash_evp_digest md5;
- struct s2n_hash_evp_digest sha1;
- struct s2n_hash_evp_digest sha224;
- struct s2n_hash_evp_digest sha256;
- struct s2n_hash_evp_digest sha384;
- struct s2n_hash_evp_digest sha512;
- struct s2n_hash_evp_digest md5_sha1;
- struct s2n_hash_evp_digest ccv_hash_copy;
- struct s2n_hash_evp_digest prf_md5_hash_copy;
- struct s2n_hash_evp_digest prf_sha1_hash_copy;
- struct s2n_hash_evp_digest prf_tls12_hash_copy;
- struct s2n_hash_evp_digest server_finished_copy;
- struct s2n_hash_evp_digest prf_md5;
-
- /* SSLv3 PRF hash states */
- struct s2n_hash_evp_digest prf_sha1;
-
- /* Initial signature hash states */
- struct s2n_hash_evp_digest initial_signature_hash;
- struct s2n_hash_evp_digest secure_signature_hash;
-};
-
/* Allocationg new EVP structs is expensive, so we back them up here and reuse them */
struct s2n_connection_hmac_handles {
struct s2n_hmac_evp_backup initial_client;
@@ -62,9 +30,5 @@ struct s2n_connection_hmac_handles {
struct s2n_hmac_evp_backup secure_server;
};
-extern int s2n_connection_save_prf_state(struct s2n_connection_prf_handles *prf_handles, struct s2n_connection *conn);
-extern int s2n_connection_save_hash_state(struct s2n_connection_hash_handles *hash_handles, struct s2n_connection *conn);
extern int s2n_connection_save_hmac_state(struct s2n_connection_hmac_handles *hmac_handles, struct s2n_connection *conn);
-extern int s2n_connection_restore_prf_state(struct s2n_connection *conn, struct s2n_connection_prf_handles *prf_handles);
-extern int s2n_connection_restore_hash_state(struct s2n_connection *conn, struct s2n_connection_hash_handles *hash_handles);
extern int s2n_connection_restore_hmac_state(struct s2n_connection *conn, struct s2n_connection_hmac_handles *hmac_handles);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_crypto.h b/contrib/restricted/aws/s2n/tls/s2n_crypto.h
index 515165c09d..de6c18b332 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_crypto.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_crypto.h
@@ -19,6 +19,7 @@
#include "tls/s2n_signature_scheme.h"
#include "tls/s2n_crypto_constants.h"
#include "tls/s2n_kem.h"
+#include "tls/s2n_tls13_secrets.h"
#include "crypto/s2n_certificate.h"
#include "crypto/s2n_cipher.h"
@@ -30,44 +31,37 @@
#include "crypto/s2n_dhe.h"
#include "crypto/s2n_ecc_evp.h"
-struct s2n_crypto_parameters {
- struct s2n_pkey server_public_key;
- struct s2n_pkey client_public_key;
+struct s2n_kex_parameters {
struct s2n_dh_params server_dh_params;
struct s2n_ecc_evp_params server_ecc_evp_params;
const struct s2n_ecc_named_curve *mutually_supported_curves[S2N_ECC_EVP_SUPPORTED_CURVES_COUNT];
- struct s2n_ecc_evp_params client_ecc_evp_params[S2N_ECC_EVP_SUPPORTED_CURVES_COUNT];
+ struct s2n_ecc_evp_params client_ecc_evp_params;
struct s2n_kem_group_params server_kem_group_params;
- struct s2n_kem_group_params *chosen_client_kem_group_params;
- struct s2n_kem_group_params client_kem_group_params[S2N_SUPPORTED_KEM_GROUPS_COUNT];
+ struct s2n_kem_group_params client_kem_group_params;
const struct s2n_kem_group *mutually_supported_kem_groups[S2N_SUPPORTED_KEM_GROUPS_COUNT];
struct s2n_kem_params kem_params;
struct s2n_blob client_key_exchange_message;
struct s2n_blob client_pq_kem_extension;
+};
- struct s2n_signature_scheme conn_sig_scheme;
-
- struct s2n_blob client_cert_chain;
- s2n_pkey_type client_cert_pkey_type;
+struct s2n_tls12_secrets {
+ uint8_t rsa_premaster_secret[S2N_TLS_SECRET_LEN];
+ uint8_t master_secret[S2N_TLS_SECRET_LEN];
+};
- struct s2n_signature_scheme client_cert_sig_scheme;
+union s2n_secrets {
+ struct s2n_tls12_secrets tls12;
+ struct s2n_tls13_secrets tls13;
+};
+struct s2n_crypto_parameters {
struct s2n_cipher_suite *cipher_suite;
struct s2n_session_key client_key;
struct s2n_session_key server_key;
-
- uint8_t rsa_premaster_secret[S2N_TLS_SECRET_LEN];
- uint8_t master_secret[S2N_TLS_SECRET_LEN];
- uint8_t client_random[S2N_TLS_RANDOM_DATA_LEN];
- uint8_t server_random[S2N_TLS_RANDOM_DATA_LEN];
- uint8_t client_implicit_iv[S2N_TLS_MAX_IV_LEN];
- uint8_t server_implicit_iv[S2N_TLS_MAX_IV_LEN];
- uint8_t client_app_secret[S2N_TLS13_SECRET_MAX_LEN];
- uint8_t server_app_secret[S2N_TLS13_SECRET_MAX_LEN];
- struct s2n_hash_state signature_hash;
struct s2n_hmac_state client_record_mac;
struct s2n_hmac_state server_record_mac;
- struct s2n_hmac_state record_mac_copy_workspace;
+ uint8_t client_implicit_iv[S2N_TLS_MAX_IV_LEN];
+ uint8_t server_implicit_iv[S2N_TLS_MAX_IV_LEN];
uint8_t client_sequence_number[S2N_TLS_SEQUENCE_NUM_LEN];
uint8_t server_sequence_number[S2N_TLS_SEQUENCE_NUM_LEN];
};
diff --git a/contrib/restricted/aws/s2n/tls/s2n_early_data.c b/contrib/restricted/aws/s2n/tls/s2n_early_data.c
new file mode 100644
index 0000000000..102ceed334
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_early_data.c
@@ -0,0 +1,435 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include <sys/param.h>
+
+#include "tls/s2n_early_data.h"
+
+#include "tls/s2n_connection.h"
+#include "tls/s2n_cipher_suites.h"
+#include "tls/s2n_psk.h"
+#include "utils/s2n_safety.h"
+#include "utils/s2n_mem.h"
+
+const s2n_early_data_state valid_previous_states[] = {
+ [S2N_EARLY_DATA_REQUESTED] = S2N_UNKNOWN_EARLY_DATA_STATE,
+ [S2N_EARLY_DATA_NOT_REQUESTED] = S2N_UNKNOWN_EARLY_DATA_STATE,
+ [S2N_EARLY_DATA_REJECTED] = S2N_EARLY_DATA_REQUESTED,
+ [S2N_EARLY_DATA_ACCEPTED] = S2N_EARLY_DATA_REQUESTED,
+ [S2N_END_OF_EARLY_DATA] = S2N_EARLY_DATA_ACCEPTED,
+};
+
+S2N_RESULT s2n_connection_set_early_data_state(struct s2n_connection *conn, s2n_early_data_state next_state)
+{
+ RESULT_ENSURE_REF(conn);
+ if (conn->early_data_state == next_state) {
+ return S2N_RESULT_OK;
+ }
+ RESULT_ENSURE(next_state < S2N_EARLY_DATA_STATES_COUNT, S2N_ERR_INVALID_EARLY_DATA_STATE);
+ RESULT_ENSURE(next_state != S2N_UNKNOWN_EARLY_DATA_STATE, S2N_ERR_INVALID_EARLY_DATA_STATE);
+ RESULT_ENSURE(conn->early_data_state == valid_previous_states[next_state], S2N_ERR_INVALID_EARLY_DATA_STATE);
+ conn->early_data_state = next_state;
+ return S2N_RESULT_OK;
+}
+
+int s2n_connection_set_early_data_expected(struct s2n_connection *conn)
+{
+ POSIX_ENSURE_REF(conn);
+ conn->early_data_expected = true;
+ return S2N_SUCCESS;
+}
+
+int s2n_connection_set_end_of_early_data(struct s2n_connection *conn)
+{
+ POSIX_ENSURE_REF(conn);
+ conn->early_data_expected = false;
+ return S2N_SUCCESS;
+}
+
+static S2N_RESULT s2n_early_data_validate(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# In order to accept early data, the server MUST have accepted a PSK
+ *# cipher suite and selected the first key offered in the client's
+ *# "pre_shared_key" extension.
+ **/
+ RESULT_ENSURE_REF(conn->psk_params.chosen_psk);
+ RESULT_ENSURE_EQ(conn->psk_params.chosen_psk_wire_index, 0);
+
+ struct s2n_early_data_config *config = &conn->psk_params.chosen_psk->early_data_config;
+ RESULT_ENSURE_GT(config->max_early_data_size, 0);
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# In addition, it MUST verify that the
+ *# following values are the same as those associated with the
+ *# selected PSK:
+ *#
+ *# - The TLS version number
+ **/
+ RESULT_ENSURE_EQ(config->protocol_version, s2n_connection_get_protocol_version(conn));
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# - The selected cipher suite
+ **/
+ RESULT_ENSURE_EQ(config->cipher_suite, conn->secure.cipher_suite);
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# - The selected ALPN [RFC7301] protocol, if any
+ **/
+ const size_t app_protocol_size = strlen(conn->application_protocol);
+ if (app_protocol_size > 0 || config->application_protocol.size > 0) {
+ RESULT_ENSURE_EQ(config->application_protocol.size, app_protocol_size + 1 /* null-terminating char */);
+ RESULT_ENSURE_EQ(memcmp(config->application_protocol.data, conn->application_protocol, app_protocol_size), 0);
+ }
+
+ return S2N_RESULT_OK;
+}
+
+bool s2n_early_data_is_valid_for_connection(struct s2n_connection *conn)
+{
+ return s2n_result_is_ok(s2n_early_data_validate(conn));
+}
+
+S2N_RESULT s2n_early_data_accept_or_reject(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+ if (conn->early_data_state != S2N_EARLY_DATA_REQUESTED) {
+ return S2N_RESULT_OK;
+ }
+
+ if (conn->handshake.early_data_async_state.conn) {
+ RESULT_BAIL(S2N_ERR_ASYNC_BLOCKED);
+ }
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# If any of these checks fail, the server MUST NOT respond with the
+ *# extension
+ **/
+ if (!s2n_early_data_is_valid_for_connection(conn)) {
+ RESULT_GUARD(s2n_connection_set_early_data_state(conn, S2N_EARLY_DATA_REJECTED));
+ return S2N_RESULT_OK;
+ }
+
+ /* Even if the connection is valid for early data, the client can't consider
+ * early data accepted until the server sends the early data indication. */
+ if (conn->mode == S2N_CLIENT) {
+ return S2N_RESULT_OK;
+ }
+
+ /* The server should reject early data if the application is not prepared to handle it. */
+ if (!conn->early_data_expected) {
+ RESULT_GUARD(s2n_connection_set_early_data_state(conn, S2N_EARLY_DATA_REJECTED));
+ return S2N_RESULT_OK;
+ }
+
+ /* If early data would otherwise be accepted, let the application apply any additional restrictions.
+ * For example, an application could use this callback to implement anti-replay protections.
+ *
+ * This callback can be either synchronous or asynchronous. The handshake will not proceed until
+ * the application either accepts or rejects early data.
+ */
+ RESULT_ENSURE_REF(conn->config);
+ if (conn->config->early_data_cb) {
+ conn->handshake.early_data_async_state.conn = conn;
+ RESULT_GUARD_POSIX(conn->config->early_data_cb(conn, &conn->handshake.early_data_async_state));
+ if (conn->early_data_state == S2N_EARLY_DATA_REQUESTED) {
+ RESULT_BAIL(S2N_ERR_ASYNC_BLOCKED);
+ }
+ } else {
+ RESULT_GUARD(s2n_connection_set_early_data_state(conn, S2N_EARLY_DATA_ACCEPTED));
+ }
+ return S2N_RESULT_OK;
+}
+
+int s2n_config_set_server_max_early_data_size(struct s2n_config *config, uint32_t max_early_data_size)
+{
+ POSIX_ENSURE_REF(config);
+ config->server_max_early_data_size = max_early_data_size;
+ return S2N_SUCCESS;
+}
+
+int s2n_connection_set_server_max_early_data_size(struct s2n_connection *conn, uint32_t max_early_data_size)
+{
+ POSIX_ENSURE_REF(conn);
+ conn->server_max_early_data_size = max_early_data_size;
+ conn->server_max_early_data_size_overridden = true;
+ return S2N_SUCCESS;
+}
+
+S2N_RESULT s2n_early_data_get_server_max_size(struct s2n_connection *conn, uint32_t *max_early_data_size)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(max_early_data_size);
+ if (conn->server_max_early_data_size_overridden) {
+ *max_early_data_size = conn->server_max_early_data_size;
+ } else {
+ RESULT_ENSURE_REF(conn->config);
+ *max_early_data_size = conn->config->server_max_early_data_size;
+ }
+ return S2N_RESULT_OK;
+}
+
+int s2n_connection_set_server_early_data_context(struct s2n_connection *conn, const uint8_t *context, uint16_t context_size)
+{
+ POSIX_ENSURE_REF(conn);
+ if (context_size > 0) {
+ POSIX_ENSURE_REF(context);
+ }
+
+ POSIX_GUARD(s2n_realloc(&conn->server_early_data_context, context_size));
+ POSIX_CHECKED_MEMCPY(conn->server_early_data_context.data, context, context_size);
+ return S2N_SUCCESS;
+}
+
+S2N_CLEANUP_RESULT s2n_early_data_config_free(struct s2n_early_data_config *config)
+{
+ if (config == NULL) {
+ return S2N_RESULT_OK;
+ }
+ RESULT_GUARD_POSIX(s2n_free(&config->application_protocol));
+ RESULT_GUARD_POSIX(s2n_free(&config->context));
+ return S2N_RESULT_OK;
+}
+
+int s2n_psk_configure_early_data(struct s2n_psk *psk, uint32_t max_early_data_size,
+ uint8_t cipher_suite_first_byte, uint8_t cipher_suite_second_byte)
+{
+ POSIX_ENSURE_REF(psk);
+
+ const uint8_t cipher_suite_iana[] = { cipher_suite_first_byte, cipher_suite_second_byte };
+ struct s2n_cipher_suite *cipher_suite = NULL;
+ POSIX_GUARD_RESULT(s2n_cipher_suite_from_iana(cipher_suite_iana, &cipher_suite));
+ POSIX_ENSURE_REF(cipher_suite);
+ POSIX_ENSURE(cipher_suite->prf_alg == psk->hmac_alg, S2N_ERR_INVALID_ARGUMENT);
+
+ psk->early_data_config.max_early_data_size = max_early_data_size;
+ psk->early_data_config.protocol_version = S2N_TLS13;
+ psk->early_data_config.cipher_suite = cipher_suite;
+ return S2N_SUCCESS;
+}
+
+int s2n_psk_set_application_protocol(struct s2n_psk *psk, const uint8_t *application_protocol, uint8_t size)
+{
+ POSIX_ENSURE_REF(psk);
+ if (size > 0) {
+ POSIX_ENSURE_REF(application_protocol);
+ }
+ struct s2n_blob *protocol_blob = &psk->early_data_config.application_protocol;
+ POSIX_GUARD(s2n_realloc(protocol_blob, size));
+ POSIX_CHECKED_MEMCPY(protocol_blob->data, application_protocol, size);
+ return S2N_SUCCESS;
+}
+
+int s2n_psk_set_early_data_context(struct s2n_psk *psk, const uint8_t *context, uint16_t size)
+{
+ POSIX_ENSURE_REF(psk);
+ if (size > 0) {
+ POSIX_ENSURE_REF(context);
+ }
+ struct s2n_blob *context_blob = &psk->early_data_config.context;
+ POSIX_GUARD(s2n_realloc(context_blob, size));
+ POSIX_CHECKED_MEMCPY(context_blob->data, context, size);
+ return S2N_SUCCESS;
+}
+
+S2N_RESULT s2n_early_data_config_clone(struct s2n_psk *new_psk, struct s2n_early_data_config *old_config)
+{
+ RESULT_ENSURE_REF(old_config);
+ RESULT_ENSURE_REF(new_psk);
+
+ struct s2n_early_data_config config_copy = new_psk->early_data_config;
+
+ /* Copy all fields from the old_config EXCEPT the blobs, which we need to reallocate. */
+ new_psk->early_data_config = *old_config;
+ new_psk->early_data_config.application_protocol = config_copy.application_protocol;
+ new_psk->early_data_config.context = config_copy.context;
+
+ /* Clone / realloc blobs */
+ RESULT_GUARD_POSIX(s2n_psk_set_application_protocol(new_psk, old_config->application_protocol.data,
+ old_config->application_protocol.size));
+ RESULT_GUARD_POSIX(s2n_psk_set_early_data_context(new_psk, old_config->context.data,
+ old_config->context.size));
+
+ return S2N_RESULT_OK;
+}
+
+int s2n_connection_get_early_data_status(struct s2n_connection *conn, s2n_early_data_status_t *status)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(status);
+
+ switch(conn->early_data_state) {
+ case S2N_EARLY_DATA_STATES_COUNT:
+ break;
+ case S2N_EARLY_DATA_NOT_REQUESTED:
+ *status = S2N_EARLY_DATA_STATUS_NOT_REQUESTED;
+ return S2N_SUCCESS;
+ case S2N_EARLY_DATA_REJECTED:
+ *status = S2N_EARLY_DATA_STATUS_REJECTED;
+ return S2N_SUCCESS;
+ case S2N_END_OF_EARLY_DATA:
+ *status = S2N_EARLY_DATA_STATUS_END;
+ return S2N_SUCCESS;
+ case S2N_UNKNOWN_EARLY_DATA_STATE:
+ case S2N_EARLY_DATA_REQUESTED:
+ case S2N_EARLY_DATA_ACCEPTED:
+ *status = S2N_EARLY_DATA_STATUS_OK;
+ return S2N_SUCCESS;
+ }
+ POSIX_BAIL(S2N_ERR_INVALID_EARLY_DATA_STATE);
+}
+
+static S2N_RESULT s2n_get_remaining_early_data_bytes(struct s2n_connection *conn, uint32_t *early_data_allowed)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(early_data_allowed);
+ *early_data_allowed = 0;
+
+ uint32_t max_early_data_size = 0;
+ RESULT_GUARD_POSIX(s2n_connection_get_max_early_data_size(conn, &max_early_data_size));
+
+ RESULT_ENSURE(max_early_data_size >= conn->early_data_bytes, S2N_ERR_MAX_EARLY_DATA_SIZE);
+ *early_data_allowed = (max_early_data_size - conn->early_data_bytes);
+
+ return S2N_RESULT_OK;
+}
+
+int s2n_connection_get_remaining_early_data_size(struct s2n_connection *conn, uint32_t *allowed_early_data_size)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(allowed_early_data_size);
+ *allowed_early_data_size = 0;
+
+ switch(conn->early_data_state) {
+ case S2N_EARLY_DATA_STATES_COUNT:
+ case S2N_EARLY_DATA_NOT_REQUESTED:
+ case S2N_EARLY_DATA_REJECTED:
+ case S2N_END_OF_EARLY_DATA:
+ *allowed_early_data_size = 0;
+ break;
+ case S2N_UNKNOWN_EARLY_DATA_STATE:
+ case S2N_EARLY_DATA_REQUESTED:
+ case S2N_EARLY_DATA_ACCEPTED:
+ POSIX_GUARD_RESULT(s2n_get_remaining_early_data_bytes(conn, allowed_early_data_size));
+ break;
+ }
+ return S2N_SUCCESS;
+}
+
+int s2n_connection_get_max_early_data_size(struct s2n_connection *conn, uint32_t *max_early_data_size)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(max_early_data_size);
+ *max_early_data_size = 0;
+
+ uint32_t server_max_early_data_size = 0;
+ POSIX_GUARD_RESULT(s2n_early_data_get_server_max_size(conn, &server_max_early_data_size));
+
+ if (conn->psk_params.psk_list.len == 0) {
+ /* This method may be called by the server before loading its PSKs.
+ * The server can load its PSKs during the handshake, either via the PSK selection callback
+ * or by receiving a stateless session ticket.
+ *
+ * Before that happens, we should make an optimistic assumption of the early data size.
+ * That way, the max early data size always decreases (for example, it won't go from 0 -> UINT32_MAX
+ * after receiving a PSK in the ClientHello).
+ */
+ if (conn->mode == S2N_SERVER && !IS_NEGOTIATED(conn)) {
+ *max_early_data_size = server_max_early_data_size;
+ }
+ return S2N_SUCCESS;
+ }
+
+ struct s2n_psk *first_psk = NULL;
+ POSIX_GUARD_RESULT(s2n_array_get(&conn->psk_params.psk_list, 0, (void**) &first_psk));
+ POSIX_ENSURE_REF(first_psk);
+ *max_early_data_size = first_psk->early_data_config.max_early_data_size;
+
+ /* For the server, we should use the minimum of the limit retrieved from the ticket
+ * and the current limit being set for new tickets.
+ *
+ * This is defensive: even if more early data was previously allowed, the server may not be
+ * willing or able to handle that much early data now.
+ *
+ * We don't do this for external PSKs because the server has intentionally set the limit
+ * while setting up this connection, not during a previous connection.
+ */
+ if (conn->mode == S2N_SERVER && first_psk->type == S2N_PSK_TYPE_RESUMPTION) {
+ *max_early_data_size = MIN(*max_early_data_size, server_max_early_data_size);
+ }
+
+ return S2N_SUCCESS;
+}
+
+int s2n_config_set_early_data_cb(struct s2n_config *config, s2n_early_data_cb cb)
+{
+ POSIX_ENSURE_REF(config);
+ config->early_data_cb = cb;
+ return S2N_SUCCESS;
+}
+
+int s2n_offered_early_data_get_context_length(struct s2n_offered_early_data *early_data, uint16_t *context_len)
+{
+ POSIX_ENSURE_REF(context_len);
+ POSIX_ENSURE_REF(early_data);
+ struct s2n_connection *conn = early_data->conn;
+
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->psk_params.chosen_psk);
+ struct s2n_early_data_config *early_data_config = &conn->psk_params.chosen_psk->early_data_config;
+
+ *context_len = early_data_config->context.size;
+
+ return S2N_SUCCESS;
+}
+
+int s2n_offered_early_data_get_context(struct s2n_offered_early_data *early_data, uint8_t *context, uint16_t max_len)
+{
+ POSIX_ENSURE_REF(context);
+ POSIX_ENSURE_REF(early_data);
+ struct s2n_connection *conn = early_data->conn;
+
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->psk_params.chosen_psk);
+ struct s2n_early_data_config *early_data_config = &conn->psk_params.chosen_psk->early_data_config;
+
+ POSIX_ENSURE(early_data_config->context.size <= max_len, S2N_ERR_INSUFFICIENT_MEM_SIZE);
+ POSIX_CHECKED_MEMCPY(context, early_data_config->context.data, early_data_config->context.size);
+
+ return S2N_SUCCESS;
+}
+
+int s2n_offered_early_data_reject(struct s2n_offered_early_data *early_data)
+{
+ POSIX_ENSURE_REF(early_data);
+ struct s2n_connection *conn = early_data->conn;
+ POSIX_ENSURE_REF(conn);
+ POSIX_GUARD_RESULT(s2n_connection_set_early_data_state(conn, S2N_EARLY_DATA_REJECTED));
+ return S2N_SUCCESS;
+}
+
+int s2n_offered_early_data_accept(struct s2n_offered_early_data *early_data)
+{
+ POSIX_ENSURE_REF(early_data);
+ struct s2n_connection *conn = early_data->conn;
+ POSIX_ENSURE_REF(conn);
+ POSIX_GUARD_RESULT(s2n_connection_set_early_data_state(conn, S2N_EARLY_DATA_ACCEPTED));
+ return S2N_SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_early_data.h b/contrib/restricted/aws/s2n/tls/s2n_early_data.h
new file mode 100644
index 0000000000..eae7f84367
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_early_data.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#pragma once
+
+#include "api/s2n.h"
+
+#include "tls/s2n_crypto_constants.h"
+#include "utils/s2n_blob.h"
+#include "utils/s2n_result.h"
+
+struct s2n_psk;
+
+typedef enum {
+ S2N_UNKNOWN_EARLY_DATA_STATE = 0,
+ S2N_EARLY_DATA_REQUESTED,
+ S2N_EARLY_DATA_NOT_REQUESTED,
+ S2N_EARLY_DATA_ACCEPTED,
+ S2N_EARLY_DATA_REJECTED,
+ S2N_END_OF_EARLY_DATA,
+ S2N_EARLY_DATA_STATES_COUNT
+} s2n_early_data_state;
+
+S2N_RESULT s2n_connection_set_early_data_state(struct s2n_connection *conn, s2n_early_data_state state);
+
+struct s2n_early_data_config {
+ uint32_t max_early_data_size;
+ uint8_t protocol_version;
+ struct s2n_cipher_suite *cipher_suite;
+ struct s2n_blob application_protocol;
+ struct s2n_blob context;
+};
+S2N_CLEANUP_RESULT s2n_early_data_config_free(struct s2n_early_data_config *config);
+S2N_RESULT s2n_early_data_config_clone(struct s2n_psk *new_psk, struct s2n_early_data_config *old_config);
+
+struct s2n_offered_early_data {
+ struct s2n_connection *conn;
+};
+
+bool s2n_early_data_is_valid_for_connection(struct s2n_connection *conn);
+S2N_RESULT s2n_early_data_accept_or_reject(struct s2n_connection *conn);
+
+S2N_RESULT s2n_early_data_get_server_max_size(struct s2n_connection *conn, uint32_t *max_early_data_size);
+
+S2N_RESULT s2n_early_data_record_bytes(struct s2n_connection *conn, ssize_t data_len);
+S2N_RESULT s2n_early_data_validate_send(struct s2n_connection *conn, uint32_t bytes_to_send);
+S2N_RESULT s2n_early_data_validate_recv(struct s2n_connection *conn);
+bool s2n_early_data_is_trial_decryption_allowed(struct s2n_connection *conn, uint8_t record_type);
+
+int s2n_connection_set_early_data_expected(struct s2n_connection *conn);
+int s2n_connection_set_end_of_early_data(struct s2n_connection *conn);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_early_data_io.c b/contrib/restricted/aws/s2n/tls/s2n_early_data_io.c
new file mode 100644
index 0000000000..1f3d9a9750
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_early_data_io.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include <sys/param.h>
+
+#include "tls/s2n_early_data.h"
+
+#include "tls/s2n_connection.h"
+#include "utils/s2n_safety.h"
+#include "utils/s2n_mem.h"
+
+int s2n_end_of_early_data_send(struct s2n_connection *conn)
+{
+ if (conn->early_data_expected) {
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->handshake.io));
+ POSIX_BAIL(S2N_ERR_EARLY_DATA_BLOCKED);
+ }
+
+ POSIX_GUARD_RESULT(s2n_connection_set_early_data_state(conn, S2N_END_OF_EARLY_DATA));
+ return S2N_SUCCESS;
+}
+
+int s2n_end_of_early_data_recv(struct s2n_connection *conn)
+{
+ POSIX_ENSURE(!s2n_connection_is_quic_enabled(conn), S2N_ERR_BAD_MESSAGE);
+ POSIX_GUARD_RESULT(s2n_connection_set_early_data_state(conn, S2N_END_OF_EARLY_DATA));
+ return S2N_SUCCESS;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# If the client attempts a 0-RTT handshake but the server
+ *# rejects it, the server will generally not have the 0-RTT record
+ *# protection keys and must instead use trial decryption (either with
+ *# the 1-RTT handshake keys or by looking for a cleartext ClientHello in
+ *# the case of a HelloRetryRequest) to find the first non-0-RTT message.
+ */
+bool s2n_early_data_is_trial_decryption_allowed(struct s2n_connection *conn, uint8_t record_type)
+{
+ return conn && (conn->early_data_state == S2N_EARLY_DATA_REJECTED)
+ && record_type == TLS_APPLICATION_DATA
+ /* Only servers receive early data. */
+ && (conn->mode == S2N_SERVER)
+ /* Early data is only expected during the handshake. */
+ && (s2n_conn_get_current_message_type(conn) != APPLICATION_DATA);
+}
+
+static bool s2n_is_early_data_io(struct s2n_connection *conn)
+{
+ if (s2n_conn_get_current_message_type(conn) == APPLICATION_DATA) {
+ return false;
+ }
+
+ /* It would be more accurate to not include this check.
+ * However, before the early data feature was added, s2n_send and s2n_recv
+ * did not verify that they were being called after a complete handshake.
+ * Enforcing that broke several S2N tests, and might have broken customers too.
+ *
+ * Therefore, only consider this early data if the customer has indicated that
+ * they are aware of early data, either because early data is currently expected
+ * or early data is in a state that indicates that early data was previously expected.
+ */
+ if (conn->early_data_expected
+ || (conn->mode == S2N_CLIENT && conn->early_data_state == S2N_EARLY_DATA_REQUESTED)
+ || conn->early_data_state == S2N_EARLY_DATA_ACCEPTED
+ || conn->early_data_state == S2N_END_OF_EARLY_DATA) {
+ return true;
+ }
+ return false;
+}
+
+S2N_RESULT s2n_early_data_record_bytes(struct s2n_connection *conn, ssize_t data_len)
+{
+ RESULT_ENSURE_REF(conn);
+ if (!s2n_is_early_data_io(conn)) {
+ return S2N_RESULT_OK;
+ }
+
+ /* Ensure the bytes read are within the bounds of what we can actually record. */
+ if (data_len > (UINT64_MAX - conn->early_data_bytes)) {
+ conn->early_data_bytes = UINT64_MAX;
+ RESULT_BAIL(S2N_ERR_INTEGER_OVERFLOW);
+ }
+
+ /* Record the early data bytes read, even if they exceed the max_early_data_size.
+ * This will ensure that if this method is called again, it will fail again:
+ * Once we receive too many bytes, we can't proceed with the connection. */
+ conn->early_data_bytes += data_len;
+
+ uint32_t max_early_data_size = 0;
+ RESULT_GUARD_POSIX(s2n_connection_get_max_early_data_size(conn, &max_early_data_size));
+ RESULT_ENSURE(conn->early_data_bytes <= max_early_data_size, S2N_ERR_MAX_EARLY_DATA_SIZE);
+
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_early_data_validate_send(struct s2n_connection *conn, uint32_t bytes_to_send)
+{
+ RESULT_ENSURE_REF(conn);
+ if (!s2n_is_early_data_io(conn)) {
+ return S2N_RESULT_OK;
+ }
+
+ RESULT_ENSURE(conn->early_data_expected, S2N_ERR_EARLY_DATA_NOT_ALLOWED);
+ RESULT_ENSURE(conn->mode == S2N_CLIENT, S2N_ERR_EARLY_DATA_NOT_ALLOWED);
+ RESULT_ENSURE(conn->early_data_state == S2N_EARLY_DATA_REQUESTED
+ || conn->early_data_state == S2N_EARLY_DATA_ACCEPTED, S2N_ERR_EARLY_DATA_NOT_ALLOWED);
+
+ uint32_t allowed_early_data_size = 0;
+ RESULT_GUARD_POSIX(s2n_connection_get_remaining_early_data_size(conn, &allowed_early_data_size));
+ RESULT_ENSURE(bytes_to_send <= allowed_early_data_size, S2N_ERR_MAX_EARLY_DATA_SIZE);
+
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_early_data_validate_recv(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+ if (!s2n_is_early_data_io(conn)) {
+ return S2N_RESULT_OK;
+ }
+
+ RESULT_ENSURE(conn->early_data_expected, S2N_ERR_EARLY_DATA_NOT_ALLOWED);
+ RESULT_ENSURE(conn->mode == S2N_SERVER, S2N_ERR_EARLY_DATA_NOT_ALLOWED);
+ RESULT_ENSURE(conn->early_data_state == S2N_EARLY_DATA_ACCEPTED, S2N_ERR_EARLY_DATA_NOT_ALLOWED);
+ RESULT_ENSURE(s2n_conn_get_current_message_type(conn) == END_OF_EARLY_DATA, S2N_ERR_EARLY_DATA_NOT_ALLOWED);
+ return S2N_RESULT_OK;
+}
+
+static bool s2n_early_data_can_continue(struct s2n_connection *conn)
+{
+ uint32_t remaining_early_data_size = 0;
+ return s2n_connection_get_remaining_early_data_size(conn, &remaining_early_data_size) >= S2N_SUCCESS
+ && remaining_early_data_size > 0;
+}
+
+S2N_RESULT s2n_send_early_data_impl(struct s2n_connection *conn, const uint8_t *data, ssize_t data_len,
+ ssize_t *data_sent, s2n_blocked_status *blocked)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(blocked);
+ *blocked = S2N_NOT_BLOCKED;
+ RESULT_ENSURE_REF(data_sent);
+ *data_sent = 0;
+
+ RESULT_ENSURE(conn->mode == S2N_CLIENT, S2N_ERR_SERVER_MODE);
+ RESULT_ENSURE(s2n_connection_supports_tls13(conn), S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
+
+ if (!s2n_early_data_can_continue(conn)) {
+ return S2N_RESULT_OK;
+ }
+
+ /* Attempt to make progress in the handshake even if s2n_send eventually fails.
+ * We only care about the result of this call if it would prevent us from calling s2n_send. */
+ int negotiate_result = s2n_negotiate(conn, blocked);
+ if (negotiate_result < S2N_SUCCESS) {
+ if (s2n_error_get_type(s2n_errno) != S2N_ERR_T_BLOCKED) {
+ return S2N_RESULT_ERROR;
+ } else if (*blocked != S2N_BLOCKED_ON_EARLY_DATA && *blocked != S2N_BLOCKED_ON_READ) {
+ return S2N_RESULT_ERROR;
+ }
+ }
+ /* Save the error status for later */
+ int negotiate_error = s2n_errno;
+ s2n_blocked_status negotiate_blocked = *blocked;
+
+ /* Attempt to send the early data.
+ * We only care about the result of this call if it fails. */
+ uint32_t early_data_to_send = 0;
+ RESULT_GUARD_POSIX(s2n_connection_get_remaining_early_data_size(conn, &early_data_to_send));
+ early_data_to_send = MIN(data_len, early_data_to_send);
+ if (early_data_to_send) {
+ ssize_t send_result = s2n_send(conn, data, early_data_to_send, blocked);
+ RESULT_GUARD_POSIX(send_result);
+ *data_sent = send_result;
+ }
+ *blocked = S2N_NOT_BLOCKED;
+
+ /* Since the send was successful, report the result of the original negotiate call.
+ * If we got this far, the result must have been success or a blocking error. */
+ if (negotiate_result < S2N_SUCCESS) {
+ RESULT_ENSURE_EQ(s2n_error_get_type(negotiate_error), S2N_ERR_T_BLOCKED);
+ if (negotiate_blocked == S2N_BLOCKED_ON_EARLY_DATA) {
+ return S2N_RESULT_OK;
+ } else if (s2n_early_data_can_continue(conn)) {
+ *blocked = negotiate_blocked;
+ RESULT_BAIL(negotiate_error);
+ } else {
+ return S2N_RESULT_OK;
+ }
+ }
+ return S2N_RESULT_OK;
+}
+
+int s2n_send_early_data(struct s2n_connection *conn, const uint8_t *data, ssize_t data_len,
+ ssize_t *data_sent, s2n_blocked_status *blocked)
+{
+ POSIX_ENSURE_REF(conn);
+
+ /* Calling this method indicates that we expect early data. */
+ POSIX_GUARD(s2n_connection_set_early_data_expected(conn));
+
+ s2n_result result = s2n_send_early_data_impl(conn, data, data_len, data_sent, blocked);
+
+ /* Unless s2n_send_early_data is called again (undoing this), we are done sending early data.
+ * If s2n_negotiate is called next, we could send the EndOfEarlyData message. */
+ POSIX_GUARD(s2n_connection_set_end_of_early_data(conn));
+
+ POSIX_GUARD_RESULT(result);
+ return S2N_SUCCESS;
+}
+
+S2N_RESULT s2n_recv_early_data_impl(struct s2n_connection *conn, uint8_t *data, ssize_t max_data_len,
+ ssize_t *data_received, s2n_blocked_status *blocked)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(blocked);
+ *blocked = S2N_NOT_BLOCKED;
+ RESULT_ENSURE_REF(data_received);
+ *data_received = 0;
+
+ RESULT_ENSURE(conn->mode == S2N_SERVER, S2N_ERR_CLIENT_MODE);
+
+ if (!s2n_early_data_can_continue(conn)) {
+ return S2N_RESULT_OK;
+ }
+
+ while(s2n_negotiate(conn, blocked) < S2N_SUCCESS) {
+ if (s2n_error_get_type(s2n_errno) != S2N_ERR_T_BLOCKED) {
+ return S2N_RESULT_ERROR;
+ } else if (max_data_len <= *data_received) {
+ return S2N_RESULT_ERROR;
+ } else if (*blocked != S2N_BLOCKED_ON_EARLY_DATA) {
+ if (s2n_early_data_can_continue(conn)) {
+ return S2N_RESULT_ERROR;
+ } else {
+ *blocked = S2N_NOT_BLOCKED;
+ return S2N_RESULT_OK;
+ }
+ }
+
+ ssize_t recv_result = s2n_recv(conn, data + *data_received,
+ max_data_len - *data_received, blocked);
+ RESULT_GUARD_POSIX(recv_result);
+ *data_received += recv_result;
+ }
+ return S2N_RESULT_OK;
+}
+
+int s2n_recv_early_data(struct s2n_connection *conn, uint8_t *data, ssize_t max_data_len,
+ ssize_t *data_received, s2n_blocked_status *blocked)
+{
+ /* Calling this method indicates that we expect early data. */
+ POSIX_GUARD(s2n_connection_set_early_data_expected(conn));
+
+ s2n_result result = s2n_recv_early_data_impl(conn, data, max_data_len, data_received, blocked);
+
+ /* Unless s2n_recv_early_data is called again (undoing this), we are done accepting early data. */
+ POSIX_GUARD(s2n_connection_set_end_of_early_data(conn));
+
+ POSIX_GUARD_RESULT(result);
+ return S2N_SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_ecc_preferences.c b/contrib/restricted/aws/s2n/tls/s2n_ecc_preferences.c
index 12db371abb..f1632a0a53 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_ecc_preferences.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_ecc_preferences.c
@@ -13,7 +13,7 @@
* permissions and limitations under the License.
*/
-#include <s2n.h>
+#include "api/s2n.h"
#include "tls/s2n_ecc_preferences.h"
#include "tls/s2n_connection.h"
@@ -39,6 +39,10 @@ const struct s2n_ecc_named_curve *const s2n_ecc_pref_list_20201021[] = {
&s2n_ecc_curve_secp521r1,
};
+const struct s2n_ecc_named_curve *const s2n_ecc_pref_list_20210816[] = {
+ &s2n_ecc_curve_secp384r1,
+};
+
const struct s2n_ecc_named_curve *const s2n_ecc_pref_list_test_all[] = {
#if EVP_APIS_SUPPORTED
&s2n_ecc_curve_x25519,
@@ -63,6 +67,11 @@ const struct s2n_ecc_preferences s2n_ecc_preferences_20201021 = {
.ecc_curves = s2n_ecc_pref_list_20201021,
};
+const struct s2n_ecc_preferences s2n_ecc_preferences_20210816 = {
+ .count = s2n_array_len(s2n_ecc_pref_list_20210816),
+ .ecc_curves = s2n_ecc_pref_list_20210816,
+};
+
const struct s2n_ecc_preferences s2n_ecc_preferences_test_all = {
.count = s2n_array_len(s2n_ecc_pref_list_test_all),
.ecc_curves = s2n_ecc_pref_list_test_all,
@@ -80,7 +89,7 @@ int s2n_check_ecc_preferences_curves_list(const struct s2n_ecc_preferences *ecc_
for (int i = 0; i < ecc_preferences->count; i++) {
const struct s2n_ecc_named_curve *named_curve = ecc_preferences->ecc_curves[i];
int curve_found = 0;
- for (int j = 0; j < s2n_all_supported_curves_list_len; j++) {
+ for (size_t j = 0; j < s2n_all_supported_curves_list_len; j++) {
if (named_curve->iana_id == s2n_all_supported_curves_list[j]->iana_id) {
curve_found = 1;
break;
@@ -88,7 +97,7 @@ int s2n_check_ecc_preferences_curves_list(const struct s2n_ecc_preferences *ecc_
}
check *= curve_found;
if (check == 0) {
- S2N_ERROR(S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
+ POSIX_BAIL(S2N_ERR_ECDHE_UNSUPPORTED_CURVE);
}
}
return S2N_SUCCESS;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_ecc_preferences.h b/contrib/restricted/aws/s2n/tls/s2n_ecc_preferences.h
index 96afa8051a..564d722726 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_ecc_preferences.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_ecc_preferences.h
@@ -15,7 +15,7 @@
#pragma once
-#include <s2n.h>
+#include "api/s2n.h"
#include <strings.h>
#include <stdbool.h>
@@ -28,6 +28,7 @@ struct s2n_ecc_preferences {
extern const struct s2n_ecc_preferences s2n_ecc_preferences_20140601;
extern const struct s2n_ecc_preferences s2n_ecc_preferences_20200310;
extern const struct s2n_ecc_preferences s2n_ecc_preferences_20201021;
+extern const struct s2n_ecc_preferences s2n_ecc_preferences_20210816;
extern const struct s2n_ecc_preferences s2n_ecc_preferences_test_all;
extern const struct s2n_ecc_preferences s2n_ecc_preferences_null;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_encrypted_extensions.c b/contrib/restricted/aws/s2n/tls/s2n_encrypted_extensions.c
index d61d7c1b2d..9f47acd8fd 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_encrypted_extensions.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_encrypted_extensions.c
@@ -36,20 +36,20 @@
int s2n_encrypted_extensions_send(struct s2n_connection *conn)
{
- notnull_check(conn);
- ENSURE_POSIX(conn->actual_protocol_version >= S2N_TLS13, S2N_ERR_BAD_MESSAGE);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE(conn->actual_protocol_version >= S2N_TLS13, S2N_ERR_BAD_MESSAGE);
struct s2n_stuffer *out = &conn->handshake.io;
- GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_ENCRYPTED_EXTENSIONS, conn, out));
+ POSIX_GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_ENCRYPTED_EXTENSIONS, conn, out));
return S2N_SUCCESS;
}
int s2n_encrypted_extensions_recv(struct s2n_connection *conn)
{
- notnull_check(conn);
- ENSURE_POSIX(conn->actual_protocol_version >= S2N_TLS13, S2N_ERR_BAD_MESSAGE);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE(conn->actual_protocol_version >= S2N_TLS13, S2N_ERR_BAD_MESSAGE);
struct s2n_stuffer *in = &conn->handshake.io;
- GUARD(s2n_extension_list_recv(S2N_EXTENSION_LIST_ENCRYPTED_EXTENSIONS, conn, in));
+ POSIX_GUARD(s2n_extension_list_recv(S2N_EXTENSION_LIST_ENCRYPTED_EXTENSIONS, conn, in));
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_establish_session.c b/contrib/restricted/aws/s2n/tls/s2n_establish_session.c
index e61b9f2850..18c4fcc8bf 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_establish_session.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_establish_session.c
@@ -14,7 +14,7 @@
*/
#include <stdint.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include "error/s2n_errno.h"
@@ -32,24 +32,21 @@
* provided session ID in its cache. */
int s2n_establish_session(struct s2n_connection *conn)
{
- GUARD(s2n_conn_set_handshake_read_block(conn));
-
/* Start by receiving and processing the entire CLIENT_HELLO message */
if (!conn->handshake.client_hello_received) {
- GUARD(s2n_client_hello_recv(conn));
+ POSIX_GUARD(s2n_client_hello_recv(conn));
conn->handshake.client_hello_received = 1;
}
- GUARD(s2n_conn_set_handshake_type(conn));
+ POSIX_GUARD_RESULT(s2n_early_data_accept_or_reject(conn));
+ POSIX_GUARD(s2n_conn_set_handshake_type(conn));
if (conn->client_hello_version != S2N_SSLv2)
{
/* We've selected the parameters for the handshake, update the required hashes for this connection */
- GUARD(s2n_conn_update_required_handshake_hashes(conn));
+ POSIX_GUARD(s2n_conn_update_required_handshake_hashes(conn));
}
- GUARD(s2n_conn_clear_handshake_read_block(conn));
-
return 0;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_handshake.c b/contrib/restricted/aws/s2n/tls/s2n_handshake.c
index 922cd67c2b..eb46950ebe 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_handshake.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_handshake.c
@@ -32,13 +32,13 @@ int s2n_handshake_write_header(struct s2n_stuffer *out, uint8_t message_type)
S2N_ERROR_IF(s2n_stuffer_data_available(out), S2N_ERR_HANDSHAKE_STATE);
/* Write the message header */
- GUARD(s2n_stuffer_write_uint8(out, message_type));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, message_type));
/* Leave the length blank for now */
uint16_t length = 0;
- GUARD(s2n_stuffer_write_uint24(out, length));
+ POSIX_GUARD(s2n_stuffer_write_uint24(out, length));
- return 0;
+ return S2N_SUCCESS;
}
int s2n_handshake_finish_header(struct s2n_stuffer *out)
@@ -49,12 +49,12 @@ int s2n_handshake_finish_header(struct s2n_stuffer *out)
uint16_t payload = length - TLS_HANDSHAKE_HEADER_LENGTH;
/* Write the message header */
- GUARD(s2n_stuffer_rewrite(out));
- GUARD(s2n_stuffer_skip_write(out, 1));
- GUARD(s2n_stuffer_write_uint24(out, payload));
- GUARD(s2n_stuffer_skip_write(out, payload));
+ POSIX_GUARD(s2n_stuffer_rewrite(out));
+ POSIX_GUARD(s2n_stuffer_skip_write(out, 1));
+ POSIX_GUARD(s2n_stuffer_write_uint24(out, payload));
+ POSIX_GUARD(s2n_stuffer_skip_write(out, payload));
- return 0;
+ return S2N_SUCCESS;
}
int s2n_handshake_parse_header(struct s2n_connection *conn, uint8_t * message_type, uint32_t * length)
@@ -62,85 +62,73 @@ int s2n_handshake_parse_header(struct s2n_connection *conn, uint8_t * message_ty
S2N_ERROR_IF(s2n_stuffer_data_available(&conn->handshake.io) < TLS_HANDSHAKE_HEADER_LENGTH, S2N_ERR_SIZE_MISMATCH);
/* read the message header */
- GUARD(s2n_stuffer_read_uint8(&conn->handshake.io, message_type));
- GUARD(s2n_stuffer_read_uint24(&conn->handshake.io, length));
+ POSIX_GUARD(s2n_stuffer_read_uint8(&conn->handshake.io, message_type));
+ POSIX_GUARD(s2n_stuffer_read_uint24(&conn->handshake.io, length));
- return 0;
+ return S2N_SUCCESS;
}
static int s2n_handshake_get_hash_state_ptr(struct s2n_connection *conn, s2n_hash_algorithm hash_alg, struct s2n_hash_state **hash_state)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->handshake.hashes);
switch (hash_alg) {
case S2N_HASH_MD5:
- *hash_state = &conn->handshake.md5;
+ *hash_state = &conn->handshake.hashes->md5;
break;
case S2N_HASH_SHA1:
- *hash_state = &conn->handshake.sha1;
+ *hash_state = &conn->handshake.hashes->sha1;
break;
case S2N_HASH_SHA224:
- *hash_state = &conn->handshake.sha224;
+ *hash_state = &conn->handshake.hashes->sha224;
break;
case S2N_HASH_SHA256:
- *hash_state = &conn->handshake.sha256;
+ *hash_state = &conn->handshake.hashes->sha256;
break;
case S2N_HASH_SHA384:
- *hash_state = &conn->handshake.sha384;
+ *hash_state = &conn->handshake.hashes->sha384;
break;
case S2N_HASH_SHA512:
- *hash_state = &conn->handshake.sha512;
+ *hash_state = &conn->handshake.hashes->sha512;
break;
case S2N_HASH_MD5_SHA1:
- *hash_state = &conn->handshake.md5_sha1;
+ *hash_state = &conn->handshake.hashes->md5_sha1;
break;
default:
- S2N_ERROR(S2N_ERR_HASH_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM);
break;
}
- return 0;
+ return S2N_SUCCESS;
}
-int s2n_handshake_reset_hash_state(struct s2n_connection *conn, s2n_hash_algorithm hash_alg)
+S2N_RESULT s2n_handshake_reset_hash_state(struct s2n_connection *conn, s2n_hash_algorithm hash_alg)
{
- struct s2n_hash_state *hash_state_ptr = NULL;
- GUARD(s2n_handshake_get_hash_state_ptr(conn, hash_alg, &hash_state_ptr));
-
- GUARD(s2n_hash_reset(hash_state_ptr));
-
- return 0;
+ struct s2n_hash_state *hash_state = NULL;
+ RESULT_GUARD_POSIX(s2n_handshake_get_hash_state_ptr(conn, hash_alg, &hash_state));
+ RESULT_GUARD_POSIX(s2n_hash_reset(hash_state));
+ return S2N_RESULT_OK;
}
-/* Copy the current hash state into the caller supplied pointer.
- * NOTE: If the underlying digest implementation is using the EVP API
- * then a pointer to the EVP ctx and md is copied. So you are actually
- * taking a reference, not a value.
- * Before using the hash_state returned by this function you must
- * use s2n_hash_copy() to avoid modifying the underlying value.
- */
-int s2n_handshake_get_hash_state(struct s2n_connection *conn, s2n_hash_algorithm hash_alg, struct s2n_hash_state *hash_state)
+S2N_RESULT s2n_handshake_copy_hash_state(struct s2n_connection *conn, s2n_hash_algorithm hash_alg, struct s2n_hash_state *copy)
{
- notnull_check(hash_state);
-
- struct s2n_hash_state *hash_state_ptr = NULL;
- GUARD(s2n_handshake_get_hash_state_ptr(conn, hash_alg, &hash_state_ptr));
-
- *hash_state = *hash_state_ptr;
-
- return 0;
+ struct s2n_hash_state *hash_state = NULL;
+ RESULT_GUARD_POSIX(s2n_handshake_get_hash_state_ptr(conn, hash_alg, &hash_state));
+ RESULT_GUARD_POSIX(s2n_hash_copy(copy, hash_state));
+ return S2N_RESULT_OK;
}
int s2n_handshake_require_all_hashes(struct s2n_handshake *handshake)
{
memset(handshake->required_hash_algs, 1, sizeof(handshake->required_hash_algs));
- return 0;
+ return S2N_SUCCESS;
}
static int s2n_handshake_require_hash(struct s2n_handshake *handshake, s2n_hash_algorithm hash_alg)
{
handshake->required_hash_algs[hash_alg] = 1;
- return 0;
+ return S2N_SUCCESS;
}
uint8_t s2n_handshake_is_hash_required(struct s2n_handshake *handshake, s2n_hash_algorithm hash_alg)
@@ -161,12 +149,12 @@ int s2n_conn_update_required_handshake_hashes(struct s2n_connection *conn)
message_type_t handshake_message = s2n_conn_get_current_message_type(conn);
const uint8_t client_cert_verify_done = (handshake_message >= CLIENT_CERT_VERIFY) ? 1 : 0;
s2n_cert_auth_type client_cert_auth_type;
- GUARD(s2n_connection_get_client_auth_type(conn, &client_cert_auth_type));
+ POSIX_GUARD(s2n_connection_get_client_auth_type(conn, &client_cert_auth_type));
/* If client authentication is possible, all hashes are needed until we're past CLIENT_CERT_VERIFY. */
if ((client_cert_auth_type != S2N_CERT_AUTH_NONE) && !client_cert_verify_done) {
- GUARD(s2n_handshake_require_all_hashes(&conn->handshake));
- return 0;
+ POSIX_GUARD(s2n_handshake_require_all_hashes(&conn->handshake));
+ return S2N_SUCCESS;
}
/* We don't need all of the hashes. Set the hash alg(s) required for the PRF */
@@ -174,8 +162,8 @@ int s2n_conn_update_required_handshake_hashes(struct s2n_connection *conn)
case S2N_SSLv3:
case S2N_TLS10:
case S2N_TLS11:
- GUARD(s2n_handshake_require_hash(&conn->handshake, S2N_HASH_MD5));
- GUARD(s2n_handshake_require_hash(&conn->handshake, S2N_HASH_SHA1));
+ POSIX_GUARD(s2n_handshake_require_hash(&conn->handshake, S2N_HASH_MD5));
+ POSIX_GUARD(s2n_handshake_require_hash(&conn->handshake, S2N_HASH_SHA1));
break;
case S2N_TLS12:
/* fall through */
@@ -184,13 +172,13 @@ int s2n_conn_update_required_handshake_hashes(struct s2n_connection *conn)
/* For TLS 1.2 and TLS 1.3, the cipher suite defines the PRF hash alg */
s2n_hmac_algorithm prf_alg = conn->secure.cipher_suite->prf_alg;
s2n_hash_algorithm hash_alg;
- GUARD(s2n_hmac_hash_alg(prf_alg, &hash_alg));
- GUARD(s2n_handshake_require_hash(&conn->handshake, hash_alg));
+ POSIX_GUARD(s2n_hmac_hash_alg(prf_alg, &hash_alg));
+ POSIX_GUARD(s2n_handshake_require_hash(&conn->handshake, hash_alg));
break;
}
}
- return 0;
+ return S2N_SUCCESS;
}
/*
@@ -216,20 +204,20 @@ int s2n_conn_update_required_handshake_hashes(struct s2n_connection *conn)
int s2n_create_wildcard_hostname(struct s2n_stuffer *hostname_stuffer, struct s2n_stuffer *output)
{
/* Find the end of the first label */
- GUARD(s2n_stuffer_skip_to_char(hostname_stuffer, '.'));
+ POSIX_GUARD(s2n_stuffer_skip_to_char(hostname_stuffer, '.'));
/* No first label found */
if (s2n_stuffer_data_available(hostname_stuffer) == 0) {
- return 0;
+ return S2N_SUCCESS;
}
/* Slap a single wildcard character to be the first label in output */
- GUARD(s2n_stuffer_write_uint8(output, '*'));
+ POSIX_GUARD(s2n_stuffer_write_uint8(output, '*'));
/* Simply copy the rest of the input to the output. */
- GUARD(s2n_stuffer_copy(hostname_stuffer, output, s2n_stuffer_data_available(hostname_stuffer)));
+ POSIX_GUARD(s2n_stuffer_copy(hostname_stuffer, output, s2n_stuffer_data_available(hostname_stuffer)));
- return 0;
+ return S2N_SUCCESS;
}
static int s2n_find_cert_matches(struct s2n_map *domain_name_to_cert_map,
@@ -239,7 +227,7 @@ static int s2n_find_cert_matches(struct s2n_map *domain_name_to_cert_map,
{
struct s2n_blob map_value;
bool key_found = false;
- GUARD_AS_POSIX(s2n_map_lookup(domain_name_to_cert_map, dns_name, &map_value, &key_found));
+ POSIX_GUARD_RESULT(s2n_map_lookup(domain_name_to_cert_map, dns_name, &map_value, &key_found));
if (key_found) {
struct certs_by_type *value = (void *) map_value.data;
for (int i = 0; i < S2N_CERT_TYPE_COUNT; i++) {
@@ -248,7 +236,7 @@ static int s2n_find_cert_matches(struct s2n_map *domain_name_to_cert_map,
*match_exists = 1;
}
- return 0;
+ return S2N_SUCCESS;
}
/* Find certificates that match the ServerName TLS extension sent by the client.
@@ -261,21 +249,21 @@ static int s2n_find_cert_matches(struct s2n_map *domain_name_to_cert_map,
int s2n_conn_find_name_matching_certs(struct s2n_connection *conn)
{
if (!s2n_server_received_server_name(conn)) {
- return 0;
+ return S2N_SUCCESS;
}
const char *name = conn->server_name;
struct s2n_blob hostname_blob = { .data = (uint8_t *) (uintptr_t) name, .size = strlen(name) };
- lte_check(hostname_blob.size, S2N_MAX_SERVER_NAME);
+ POSIX_ENSURE_LTE(hostname_blob.size, S2N_MAX_SERVER_NAME);
char normalized_hostname[S2N_MAX_SERVER_NAME + 1] = { 0 };
- memcpy_check(normalized_hostname, hostname_blob.data, hostname_blob.size);
+ POSIX_CHECKED_MEMCPY(normalized_hostname, hostname_blob.data, hostname_blob.size);
struct s2n_blob normalized_name = { .data = (uint8_t *) normalized_hostname, .size = hostname_blob.size };
- GUARD(s2n_blob_char_to_lower(&normalized_name));
+ POSIX_GUARD(s2n_blob_char_to_lower(&normalized_name));
struct s2n_stuffer normalized_hostname_stuffer;
- GUARD(s2n_stuffer_init(&normalized_hostname_stuffer, &normalized_name));
- GUARD(s2n_stuffer_skip_write(&normalized_hostname_stuffer, normalized_name.size));
+ POSIX_GUARD(s2n_stuffer_init(&normalized_hostname_stuffer, &normalized_name));
+ POSIX_GUARD(s2n_stuffer_skip_write(&normalized_hostname_stuffer, normalized_name.size));
/* Find the exact matches for the ServerName */
- GUARD(s2n_find_cert_matches(conn->config->domain_name_to_cert_map,
+ POSIX_GUARD(s2n_find_cert_matches(conn->config->domain_name_to_cert_map,
&normalized_name,
conn->handshake_params.exact_sni_matches,
&(conn->handshake_params.exact_sni_match_exists)));
@@ -285,18 +273,18 @@ int s2n_conn_find_name_matching_certs(struct s2n_connection *conn)
char wildcard_hostname[S2N_MAX_SERVER_NAME + 1] = { 0 };
struct s2n_blob wildcard_blob = { .data = (uint8_t *) wildcard_hostname, .size = sizeof(wildcard_hostname) };
struct s2n_stuffer wildcard_stuffer;
- GUARD(s2n_stuffer_init(&wildcard_stuffer, &wildcard_blob));
- GUARD(s2n_create_wildcard_hostname(&normalized_hostname_stuffer, &wildcard_stuffer));
+ POSIX_GUARD(s2n_stuffer_init(&wildcard_stuffer, &wildcard_blob));
+ POSIX_GUARD(s2n_create_wildcard_hostname(&normalized_hostname_stuffer, &wildcard_stuffer));
const uint32_t wildcard_len = s2n_stuffer_data_available(&wildcard_stuffer);
/* Couldn't create a valid wildcard from the input */
if (wildcard_len == 0) {
- return 0;
+ return S2N_SUCCESS;
}
/* The client's SNI is wildcardified, do an exact match against the set of server certs. */
wildcard_blob.size = wildcard_len;
- GUARD(s2n_find_cert_matches(conn->config->domain_name_to_cert_map,
+ POSIX_GUARD(s2n_find_cert_matches(conn->config->domain_name_to_cert_map,
&wildcard_blob,
conn->handshake_params.wc_sni_matches,
&(conn->handshake_params.wc_sni_match_exists)));
@@ -309,7 +297,7 @@ int s2n_conn_find_name_matching_certs(struct s2n_connection *conn)
|| conn->handshake_params.exact_sni_match_exists
|| conn->handshake_params.wc_sni_match_exists;
- return 0;
+ return S2N_SUCCESS;
}
/* Find the optimal certificate of a specific type.
@@ -329,3 +317,29 @@ struct s2n_cert_chain_and_key *s2n_get_compatible_cert_chain_and_key(struct s2n_
return conn->config->default_certs_by_type.certs[cert_type];
}
}
+
+/* This method will work when testing S2N, and for the EndOfEarlyData message.
+ *
+ * However, it will NOT work for arbitrary message types when potentially receiving records
+ * that contain multiple messages, like when talking to a non-S2N TLS implementation. If the "end_message"
+ * is not the first message in a multi-message record, negotiation will not stop.
+ * (This is not an issue for EndOfEarlyData because encryption and message order requirements force
+ * EndOfEarlyData to always be the first and only handshake message in its handshake record)
+ */
+S2N_RESULT s2n_negotiate_until_message(struct s2n_connection *conn, s2n_blocked_status *blocked, message_type_t end_message)
+{
+ RESULT_ENSURE_REF(conn);
+ conn->handshake.end_of_messages = end_message;
+ int r = s2n_negotiate(conn, blocked);
+ conn->handshake.end_of_messages = APPLICATION_DATA;
+ RESULT_GUARD_POSIX(r);
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_handshake_validate(const struct s2n_handshake *s2n_handshake)
+{
+ RESULT_ENSURE_REF(s2n_handshake);
+ RESULT_DEBUG_ENSURE(s2n_handshake->handshake_type < 256, S2N_ERR_SAFETY);
+ RESULT_DEBUG_ENSURE(s2n_handshake->message_number >= 0 && s2n_handshake->message_number < 32, S2N_ERR_SAFETY);
+ return S2N_RESULT_OK;
+}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_handshake.h b/contrib/restricted/aws/s2n/tls/s2n_handshake.h
index cb871889e4..5b73d1f626 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_handshake.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_handshake.h
@@ -16,9 +16,11 @@
#pragma once
#include <stdint.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include "tls/s2n_crypto.h"
+#include "tls/s2n_handshake_hashes.h"
+#include "tls/s2n_handshake_type.h"
#include "tls/s2n_signature_algorithms.h"
#include "tls/s2n_tls_parameters.h"
@@ -32,6 +34,7 @@
#define TLS_CLIENT_HELLO 1
#define TLS_SERVER_HELLO 2
#define TLS_SERVER_NEW_SESSION_TICKET 4
+#define TLS_END_OF_EARLY_DATA 5
#define TLS_ENCRYPTED_EXTENSIONS 8
#define TLS_CERTIFICATE 11
#define TLS_SERVER_KEY 12
@@ -67,23 +70,33 @@ typedef enum {
ENCRYPTED_EXTENSIONS,
SERVER_CERT_VERIFY,
HELLO_RETRY_MSG,
+ END_OF_EARLY_DATA,
APPLICATION_DATA,
} message_type_t;
typedef enum {
S2N_ASYNC_NOT_INVOKED = 0,
- S2N_ASYNC_INVOKING_CALLBACK,
- S2N_ASYNC_INVOKED_WAITING,
- S2N_ASYNC_INVOKED_COMPLETE,
+ S2N_ASYNC_INVOKED,
+ S2N_ASYNC_COMPLETE,
} s2n_async_state;
struct s2n_handshake_parameters {
+ /* Public keys for server / client */
+ struct s2n_pkey server_public_key;
+ struct s2n_pkey client_public_key;
+ struct s2n_blob client_cert_chain;
+ s2n_pkey_type client_cert_pkey_type;
+
/* Signature/hash algorithm pairs offered by the client in the signature_algorithms extension */
struct s2n_sig_scheme_list client_sig_hash_algs;
+ /* Signature scheme chosen by the server */
+ struct s2n_signature_scheme conn_sig_scheme;
/* Signature/hash algorithm pairs offered by the server in the certificate request */
struct s2n_sig_scheme_list server_sig_hash_algs;
+ /* Signature scheme chosen by the client */
+ struct s2n_signature_scheme client_cert_sig_scheme;
/* The cert chain we will send the peer. */
struct s2n_cert_chain_and_key *our_chain_and_key;
@@ -114,28 +127,15 @@ struct s2n_handshake_parameters {
struct s2n_cert_chain_and_key *wc_sni_matches[S2N_CERT_TYPE_COUNT];
uint8_t exact_sni_match_exists;
uint8_t wc_sni_match_exists;
+
+ uint8_t client_random[S2N_TLS_RANDOM_DATA_LEN];
+ uint8_t server_random[S2N_TLS_RANDOM_DATA_LEN];
};
struct s2n_handshake {
struct s2n_stuffer io;
- struct s2n_hash_state md5;
- struct s2n_hash_state sha1;
- struct s2n_hash_state sha224;
- struct s2n_hash_state sha256;
- struct s2n_hash_state sha384;
- struct s2n_hash_state sha512;
- struct s2n_hash_state md5_sha1;
-
- /* A copy of the handshake messages hash used to validate the CertificateVerify message */
- struct s2n_hash_state ccv_hash_copy;
-
- /* Used for SSLv3, TLS 1.0, and TLS 1.1 PRFs */
- struct s2n_hash_state prf_md5_hash_copy;
- struct s2n_hash_state prf_sha1_hash_copy;
- /*Used for TLS 1.2 PRF */
- struct s2n_hash_state prf_tls12_hash_copy;
- struct s2n_hash_state server_finished_copy;
+ struct s2n_handshake_hashes *hashes;
/* Hash algorithms required for this handshake. The set of required hashes can be reduced as session parameters are
* negotiated, i.e. cipher suite and protocol version.
@@ -145,53 +145,23 @@ struct s2n_handshake {
uint8_t server_finished[S2N_TLS_SECRET_LEN];
uint8_t client_finished[S2N_TLS_SECRET_LEN];
- /* Handshake type is a bitset, with the following
- bit positions */
+ /* Which message-order affecting features are enabled */
uint32_t handshake_type;
-/* Has the handshake been negotiated yet? */
-#define INITIAL 0x00
-#define NEGOTIATED 0x01
-#define IS_NEGOTIATED( type ) ( (type) & NEGOTIATED )
-
-/* Handshake is a full handshake */
-#define FULL_HANDSHAKE 0x02
-#define IS_FULL_HANDSHAKE( type ) ( (type) & FULL_HANDSHAKE )
-#define IS_RESUMPTION_HANDSHAKE( type ) ( !IS_FULL_HANDSHAKE( (type) ) && IS_NEGOTIATED ( (type) ) )
-
-/* Handshake uses perfect forward secrecy */
-#define TLS12_PERFECT_FORWARD_SECRECY 0x04
-
-/* Handshake needs OCSP status message */
-#define OCSP_STATUS 0x08
-#define IS_OCSP_STAPLED( type ) ( ( (type) & OCSP_STATUS ) != 0 )
-
-/* Handshake should request a Client Certificate */
-#define CLIENT_AUTH 0x10
-#define IS_CLIENT_AUTH_HANDSHAKE( type ) ( (type) & CLIENT_AUTH )
-
-/* Session Resumption via session-tickets */
-#define WITH_SESSION_TICKET 0x20
-#define IS_ISSUING_NEW_SESSION_TICKET( type ) ( (type) & WITH_SESSION_TICKET )
-
-/* Handshake requested a Client Certificate but did not get one */
-#define NO_CLIENT_CERT 0x40
-#define IS_CLIENT_AUTH_NO_CERT( type ) ( IS_CLIENT_AUTH_HANDSHAKE( (type) ) && ( (type) & NO_CLIENT_CERT) )
-
-/* A HelloRetryRequest was needed to proceed with the handshake */
-#define HELLO_RETRY_REQUEST 0x80
-
-/* Disguise a TLS1.3 handshake as a TLS1.2 handshake for backwards compatibility
- * with some middleboxes: https://tools.ietf.org/html/rfc8446#appendix-D.4 */
-#define MIDDLEBOX_COMPAT 0x100
-#define IS_MIDDLEBOX_COMPAT_MODE( type ) ( (type) & MIDDLEBOX_COMPAT )
-
/* Which handshake message number are we processing */
int message_number;
+ /* Last message in the handshake. Unless using early data or testing,
+ * should always be APPLICATION_DATA. */
+ message_type_t end_of_messages;
+
/* State of the async pkey operation during handshake */
s2n_async_state async_state;
+ /* State of the async early data callback.
+ * If not initialized, then the callback has not been triggered yet. */
+ struct s2n_offered_early_data early_data_async_state;
+
/* Indicates the CLIENT_HELLO message has been completely received */
unsigned client_hello_received:1;
@@ -202,19 +172,28 @@ struct s2n_handshake {
unsigned rsa_failed:1;
};
-extern message_type_t s2n_conn_get_current_message_type(struct s2n_connection *conn);
-extern int s2n_conn_set_handshake_type(struct s2n_connection *conn);
-extern int s2n_conn_set_handshake_no_client_cert(struct s2n_connection *conn);
-extern int s2n_conn_set_handshake_read_block(struct s2n_connection *conn);
-extern int s2n_conn_clear_handshake_read_block(struct s2n_connection *conn);
-extern int s2n_handshake_require_all_hashes(struct s2n_handshake *handshake);
-extern uint8_t s2n_handshake_is_hash_required(struct s2n_handshake *handshake, s2n_hash_algorithm hash_alg);
-extern int s2n_conn_update_required_handshake_hashes(struct s2n_connection *conn);
-extern int s2n_handshake_get_hash_state(struct s2n_connection *conn, s2n_hash_algorithm hash_alg, struct s2n_hash_state *hash_state);
-extern int s2n_handshake_reset_hash_state(struct s2n_connection *conn, s2n_hash_algorithm hash_alg);
-extern int s2n_conn_find_name_matching_certs(struct s2n_connection *conn);
-extern int s2n_create_wildcard_hostname(struct s2n_stuffer *hostname, struct s2n_stuffer *output);
+/* Only used in our test cases. */
+message_type_t s2n_conn_get_current_message_type(struct s2n_connection *conn);
+
+/* s2n_handshake */
+int s2n_handshake_require_all_hashes(struct s2n_handshake *handshake);
+uint8_t s2n_handshake_is_hash_required(struct s2n_handshake *handshake, s2n_hash_algorithm hash_alg);
+int s2n_conn_update_required_handshake_hashes(struct s2n_connection *conn);
+S2N_RESULT s2n_handshake_copy_hash_state(struct s2n_connection *conn, s2n_hash_algorithm hash_alg, struct s2n_hash_state *hash_state);
+S2N_RESULT s2n_handshake_reset_hash_state(struct s2n_connection *conn, s2n_hash_algorithm hash_alg);
+int s2n_conn_find_name_matching_certs(struct s2n_connection *conn);
+int s2n_create_wildcard_hostname(struct s2n_stuffer *hostname, struct s2n_stuffer *output);
struct s2n_cert_chain_and_key *s2n_get_compatible_cert_chain_and_key(struct s2n_connection *conn, const s2n_pkey_type cert_type);
+S2N_RESULT s2n_negotiate_until_message(struct s2n_connection *conn, s2n_blocked_status *blocked, message_type_t end_message);
+S2N_RESULT s2n_handshake_validate(const struct s2n_handshake *s2n_handshake);
+
+/* s2n_handshake_io */
+int s2n_conn_set_handshake_type(struct s2n_connection *conn);
+int s2n_conn_set_handshake_no_client_cert(struct s2n_connection *conn);
+
+/* s2n_handshake_transcript */
int s2n_conn_update_handshake_hashes(struct s2n_connection *conn, struct s2n_blob *data);
+
+/* s2n_quic_support */
S2N_RESULT s2n_quic_read_handshake_message(struct s2n_connection *conn, uint8_t *message_type);
S2N_RESULT s2n_quic_write_handshake_message(struct s2n_connection *conn, struct s2n_blob *in);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_handshake_hashes.c b/contrib/restricted/aws/s2n/tls/s2n_handshake_hashes.c
new file mode 100644
index 0000000000..c3c727af81
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_handshake_hashes.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include "tls/s2n_handshake_hashes.h"
+
+#include "crypto/s2n_fips.h"
+#include "tls/s2n_connection.h"
+#include "utils/s2n_blob.h"
+#include "utils/s2n_mem.h"
+#include "utils/s2n_safety.h"
+
+static S2N_RESULT s2n_handshake_hashes_new_hashes(struct s2n_handshake_hashes *hashes)
+{
+ RESULT_ENSURE_REF(hashes);
+ RESULT_GUARD_POSIX(s2n_hash_new(&hashes->md5));
+ RESULT_GUARD_POSIX(s2n_hash_new(&hashes->sha1));
+ RESULT_GUARD_POSIX(s2n_hash_new(&hashes->sha224));
+ RESULT_GUARD_POSIX(s2n_hash_new(&hashes->sha256));
+ RESULT_GUARD_POSIX(s2n_hash_new(&hashes->sha384));
+ RESULT_GUARD_POSIX(s2n_hash_new(&hashes->sha512));
+ RESULT_GUARD_POSIX(s2n_hash_new(&hashes->md5_sha1));
+ RESULT_GUARD_POSIX(s2n_hash_new(&hashes->hash_workspace));
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_handshake_hashes_reset_hashes(struct s2n_handshake_hashes *hashes)
+{
+ RESULT_ENSURE_REF(hashes);
+ RESULT_GUARD_POSIX(s2n_hash_reset(&hashes->md5));
+ RESULT_GUARD_POSIX(s2n_hash_reset(&hashes->sha1));
+ RESULT_GUARD_POSIX(s2n_hash_reset(&hashes->sha224));
+ RESULT_GUARD_POSIX(s2n_hash_reset(&hashes->sha256));
+ RESULT_GUARD_POSIX(s2n_hash_reset(&hashes->sha384));
+ RESULT_GUARD_POSIX(s2n_hash_reset(&hashes->sha512));
+ RESULT_GUARD_POSIX(s2n_hash_reset(&hashes->md5_sha1));
+ RESULT_GUARD_POSIX(s2n_hash_reset(&hashes->hash_workspace));
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_handshake_hashes_free_hashes(struct s2n_handshake_hashes *hashes)
+{
+ if (!hashes) {
+ return S2N_RESULT_OK;
+ }
+ RESULT_GUARD_POSIX(s2n_hash_free(&hashes->md5));
+ RESULT_GUARD_POSIX(s2n_hash_free(&hashes->sha1));
+ RESULT_GUARD_POSIX(s2n_hash_free(&hashes->sha224));
+ RESULT_GUARD_POSIX(s2n_hash_free(&hashes->sha256));
+ RESULT_GUARD_POSIX(s2n_hash_free(&hashes->sha384));
+ RESULT_GUARD_POSIX(s2n_hash_free(&hashes->sha512));
+ RESULT_GUARD_POSIX(s2n_hash_free(&hashes->md5_sha1));
+ RESULT_GUARD_POSIX(s2n_hash_free(&hashes->hash_workspace));
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_handshake_hashes_init_hashes(struct s2n_handshake_hashes *hashes)
+{
+ /* Allow MD5 for hash states that are used by the PRF. This is required
+ * to comply with the TLS 1.0 and 1.1 RFCs and is approved as per
+ * NIST Special Publication 800-52 Revision 1.
+ */
+ if (s2n_is_in_fips_mode()) {
+ RESULT_GUARD_POSIX(s2n_hash_allow_md5_for_fips(&hashes->md5));
+ RESULT_GUARD_POSIX(s2n_hash_allow_md5_for_fips(&hashes->hash_workspace));
+
+ /* Do not check s2n_hash_is_available before initialization. Allow MD5 and
+ * SHA-1 for both fips and non-fips mode. This is required to perform the
+ * signature checks in the CertificateVerify message in TLS 1.0 and TLS 1.1.
+ * This is approved per Nist SP 800-52r1.*/
+ RESULT_GUARD_POSIX(s2n_hash_allow_md5_for_fips(&hashes->md5_sha1));
+ }
+
+ RESULT_GUARD_POSIX(s2n_hash_init(&hashes->md5, S2N_HASH_MD5));
+ RESULT_GUARD_POSIX(s2n_hash_init(&hashes->sha1, S2N_HASH_SHA1));
+ RESULT_GUARD_POSIX(s2n_hash_init(&hashes->sha224, S2N_HASH_SHA224));
+ RESULT_GUARD_POSIX(s2n_hash_init(&hashes->sha256, S2N_HASH_SHA256));
+ RESULT_GUARD_POSIX(s2n_hash_init(&hashes->sha384, S2N_HASH_SHA384));
+ RESULT_GUARD_POSIX(s2n_hash_init(&hashes->sha512, S2N_HASH_SHA512));
+ RESULT_GUARD_POSIX(s2n_hash_init(&hashes->md5_sha1, S2N_HASH_MD5_SHA1));
+ RESULT_GUARD_POSIX(s2n_hash_init(&hashes->hash_workspace, S2N_HASH_NONE));
+
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_handshake_hashes_new(struct s2n_handshake_hashes **hashes)
+{
+ RESULT_ENSURE_REF(hashes);
+ RESULT_ENSURE_EQ(*hashes, NULL);
+
+ DEFER_CLEANUP(struct s2n_blob data = { 0 }, s2n_free);
+ RESULT_GUARD_POSIX(s2n_realloc(&data, sizeof(struct s2n_handshake_hashes)));
+ RESULT_GUARD_POSIX(s2n_blob_zero(&data));
+ *hashes = (struct s2n_handshake_hashes*)(void*) data.data;
+ ZERO_TO_DISABLE_DEFER_CLEANUP(data);
+
+ RESULT_GUARD(s2n_handshake_hashes_new_hashes(*hashes));
+ RESULT_GUARD(s2n_handshake_hashes_init_hashes(*hashes));
+
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_handshake_hashes_wipe(struct s2n_handshake_hashes *hashes)
+{
+ RESULT_GUARD(s2n_handshake_hashes_reset_hashes(hashes));
+ return S2N_RESULT_OK;
+}
+
+S2N_CLEANUP_RESULT s2n_handshake_hashes_free(struct s2n_handshake_hashes **hashes)
+{
+ RESULT_ENSURE_REF(hashes);
+ RESULT_GUARD(s2n_handshake_hashes_free_hashes(*hashes));
+ RESULT_GUARD_POSIX(s2n_free_object((uint8_t**) hashes, sizeof(struct s2n_handshake_hashes)));
+ return S2N_RESULT_OK;
+}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_handshake_hashes.h b/contrib/restricted/aws/s2n/tls/s2n_handshake_hashes.h
new file mode 100644
index 0000000000..22d157deb4
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_handshake_hashes.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#pragma once
+
+#include "api/s2n.h"
+
+#include "crypto/s2n_hash.h"
+#include "crypto/s2n_tls13_keys.h"
+
+struct s2n_handshake_hashes {
+ struct s2n_hash_state md5;
+ struct s2n_hash_state sha1;
+ struct s2n_hash_state sha224;
+ struct s2n_hash_state sha256;
+ struct s2n_hash_state sha384;
+ struct s2n_hash_state sha512;
+ struct s2n_hash_state md5_sha1;
+
+ /* TLS1.3 requires transcript hash digests to calculate secrets.
+ */
+ uint8_t transcript_hash_digest[S2N_TLS13_SECRET_MAX_LEN];
+
+ /* To avoid allocating memory for hash objects, we reuse one temporary hash object.
+ * Do NOT rely on this hash state maintaining its value outside of the current context.
+ */
+ struct s2n_hash_state hash_workspace;
+};
+
+S2N_RESULT s2n_handshake_hashes_new(struct s2n_handshake_hashes **hashes);
+S2N_RESULT s2n_handshake_hashes_wipe(struct s2n_handshake_hashes *hashes);
+S2N_CLEANUP_RESULT s2n_handshake_hashes_free(struct s2n_handshake_hashes **hashes);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_handshake_io.c b/contrib/restricted/aws/s2n/tls/s2n_handshake_io.c
index 5b78ef79c6..3c490212a7 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_handshake_io.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_handshake_io.c
@@ -16,7 +16,7 @@
#include <sys/param.h>
#include <errno.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include "error/s2n_errno.h"
@@ -31,14 +31,16 @@
#include "tls/s2n_tls.h"
#include "tls/s2n_tls13.h"
#include "tls/s2n_tls13_handshake.h"
+#include "tls/s2n_tls13_key_schedule.h"
#include "tls/s2n_kex.h"
+#include "tls/s2n_post_handshake.h"
#include "stuffer/s2n_stuffer.h"
#include "utils/s2n_safety.h"
#include "utils/s2n_socket.h"
#include "utils/s2n_random.h"
-#include "utils/s2n_str.h"
+#include "utils/s2n_bitmap.h"
/* clang-format off */
struct s2n_handshake_action {
@@ -51,13 +53,13 @@ struct s2n_handshake_action {
static int s2n_always_fail_send(struct s2n_connection *conn)
{
/* This state should never be sending a handshake message. */
- S2N_ERROR(S2N_ERR_HANDSHAKE_UNREACHABLE);
+ POSIX_BAIL(S2N_ERR_HANDSHAKE_UNREACHABLE);
}
static int s2n_always_fail_recv(struct s2n_connection *conn)
{
/* This state should never have an incoming handshake message. */
- S2N_ERROR(S2N_ERR_HANDSHAKE_UNREACHABLE);
+ POSIX_BAIL(S2N_ERR_HANDSHAKE_UNREACHABLE);
}
/* Client and Server handlers for each message type we support.
@@ -100,6 +102,7 @@ static struct s2n_handshake_action tls13_state_machine[] = {
[CLIENT_CERT] = {TLS_HANDSHAKE, TLS_CERTIFICATE, 'C', {s2n_client_cert_recv, s2n_client_cert_send}},
[CLIENT_CERT_VERIFY] = {TLS_HANDSHAKE, TLS_CERT_VERIFY, 'C', {s2n_tls13_cert_verify_recv, s2n_tls13_cert_verify_send}},
[CLIENT_FINISHED] = {TLS_HANDSHAKE, TLS_FINISHED, 'C', {s2n_tls13_client_finished_recv, s2n_tls13_client_finished_send}},
+ [END_OF_EARLY_DATA] = {TLS_HANDSHAKE, TLS_END_OF_EARLY_DATA, 'C', {s2n_end_of_early_data_recv, s2n_end_of_early_data_send}},
/* Not used by TLS1.3, except to maintain middlebox compatibility */
[CLIENT_CHANGE_CIPHER_SPEC] = {TLS_CHANGE_CIPHER_SPEC, 0, 'C', {s2n_basic_ccs_recv, s2n_ccs_send}},
@@ -129,12 +132,10 @@ static const char *message_names[] = {
MESSAGE_NAME_ENTRY(SERVER_CHANGE_CIPHER_SPEC),
MESSAGE_NAME_ENTRY(SERVER_FINISHED),
MESSAGE_NAME_ENTRY(HELLO_RETRY_MSG),
+ MESSAGE_NAME_ENTRY(END_OF_EARLY_DATA),
MESSAGE_NAME_ENTRY(APPLICATION_DATA),
};
-/* Maximum number of valid handshakes */
-#define S2N_HANDSHAKES_COUNT 512
-
/* Maximum number of messages in a handshake */
#define S2N_MAX_HANDSHAKE_LENGTH 32
@@ -366,11 +367,21 @@ static message_type_t tls13_handshakes[S2N_HANDSHAKES_COUNT][S2N_MAX_HANDSHAKE_L
SERVER_HELLO
},
+ [INITIAL | MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS] = {
+ CLIENT_HELLO, CLIENT_CHANGE_CIPHER_SPEC,
+ SERVER_HELLO
+ },
+
[INITIAL | HELLO_RETRY_REQUEST] = {
CLIENT_HELLO,
HELLO_RETRY_MSG
},
+ [INITIAL | HELLO_RETRY_REQUEST | MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS] = {
+ CLIENT_HELLO, CLIENT_CHANGE_CIPHER_SPEC,
+ HELLO_RETRY_MSG
+ },
+
[NEGOTIATED] = {
CLIENT_HELLO,
SERVER_HELLO, ENCRYPTED_EXTENSIONS, SERVER_FINISHED,
@@ -378,6 +389,13 @@ static message_type_t tls13_handshakes[S2N_HANDSHAKES_COUNT][S2N_MAX_HANDSHAKE_L
APPLICATION_DATA
},
+ [NEGOTIATED | WITH_EARLY_DATA] = {
+ CLIENT_HELLO,
+ SERVER_HELLO, ENCRYPTED_EXTENSIONS, SERVER_FINISHED,
+ END_OF_EARLY_DATA, CLIENT_FINISHED,
+ APPLICATION_DATA
+ },
+
[NEGOTIATED | MIDDLEBOX_COMPAT] = {
CLIENT_HELLO,
SERVER_HELLO, SERVER_CHANGE_CIPHER_SPEC, ENCRYPTED_EXTENSIONS, SERVER_FINISHED,
@@ -385,6 +403,27 @@ static message_type_t tls13_handshakes[S2N_HANDSHAKES_COUNT][S2N_MAX_HANDSHAKE_L
APPLICATION_DATA
},
+ [NEGOTIATED | MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS] = {
+ CLIENT_HELLO, CLIENT_CHANGE_CIPHER_SPEC,
+ SERVER_HELLO, SERVER_CHANGE_CIPHER_SPEC, ENCRYPTED_EXTENSIONS, SERVER_FINISHED,
+ CLIENT_FINISHED,
+ APPLICATION_DATA
+ },
+
+ [NEGOTIATED | MIDDLEBOX_COMPAT | WITH_EARLY_DATA] = {
+ CLIENT_HELLO,
+ SERVER_HELLO, SERVER_CHANGE_CIPHER_SPEC, ENCRYPTED_EXTENSIONS, SERVER_FINISHED,
+ CLIENT_CHANGE_CIPHER_SPEC, END_OF_EARLY_DATA, CLIENT_FINISHED,
+ APPLICATION_DATA
+ },
+
+ [NEGOTIATED | MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS | WITH_EARLY_DATA] = {
+ CLIENT_HELLO, CLIENT_CHANGE_CIPHER_SPEC,
+ SERVER_HELLO, SERVER_CHANGE_CIPHER_SPEC, ENCRYPTED_EXTENSIONS, SERVER_FINISHED,
+ END_OF_EARLY_DATA, CLIENT_FINISHED,
+ APPLICATION_DATA
+ },
+
[NEGOTIATED | HELLO_RETRY_REQUEST] = {
CLIENT_HELLO,
HELLO_RETRY_MSG,
@@ -403,6 +442,15 @@ static message_type_t tls13_handshakes[S2N_HANDSHAKES_COUNT][S2N_MAX_HANDSHAKE_L
APPLICATION_DATA
},
+ [NEGOTIATED | HELLO_RETRY_REQUEST | MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS] = {
+ CLIENT_HELLO, CLIENT_CHANGE_CIPHER_SPEC,
+ HELLO_RETRY_MSG, SERVER_CHANGE_CIPHER_SPEC,
+ CLIENT_HELLO,
+ SERVER_HELLO, ENCRYPTED_EXTENSIONS, SERVER_FINISHED,
+ CLIENT_FINISHED,
+ APPLICATION_DATA
+ },
+
[NEGOTIATED | FULL_HANDSHAKE] = {
CLIENT_HELLO,
SERVER_HELLO, ENCRYPTED_EXTENSIONS, SERVER_CERT, SERVER_CERT_VERIFY, SERVER_FINISHED,
@@ -417,6 +465,13 @@ static message_type_t tls13_handshakes[S2N_HANDSHAKES_COUNT][S2N_MAX_HANDSHAKE_L
APPLICATION_DATA
},
+ [NEGOTIATED | FULL_HANDSHAKE | MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS] = {
+ CLIENT_HELLO, CLIENT_CHANGE_CIPHER_SPEC,
+ SERVER_HELLO, SERVER_CHANGE_CIPHER_SPEC, ENCRYPTED_EXTENSIONS, SERVER_CERT, SERVER_CERT_VERIFY, SERVER_FINISHED,
+ CLIENT_FINISHED,
+ APPLICATION_DATA
+ },
+
[NEGOTIATED | FULL_HANDSHAKE | HELLO_RETRY_REQUEST] = {
CLIENT_HELLO,
HELLO_RETRY_MSG,
@@ -435,6 +490,15 @@ static message_type_t tls13_handshakes[S2N_HANDSHAKES_COUNT][S2N_MAX_HANDSHAKE_L
APPLICATION_DATA
},
+ [NEGOTIATED | FULL_HANDSHAKE | HELLO_RETRY_REQUEST | MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS] = {
+ CLIENT_HELLO, CLIENT_CHANGE_CIPHER_SPEC,
+ HELLO_RETRY_MSG, SERVER_CHANGE_CIPHER_SPEC,
+ CLIENT_HELLO,
+ SERVER_HELLO, ENCRYPTED_EXTENSIONS, SERVER_CERT, SERVER_CERT_VERIFY, SERVER_FINISHED,
+ CLIENT_FINISHED,
+ APPLICATION_DATA
+ },
+
[NEGOTIATED | FULL_HANDSHAKE | HELLO_RETRY_REQUEST | CLIENT_AUTH] = {
CLIENT_HELLO,
HELLO_RETRY_MSG,
@@ -453,6 +517,15 @@ static message_type_t tls13_handshakes[S2N_HANDSHAKES_COUNT][S2N_MAX_HANDSHAKE_L
APPLICATION_DATA
},
+ [NEGOTIATED | FULL_HANDSHAKE | HELLO_RETRY_REQUEST | CLIENT_AUTH | MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS] = {
+ CLIENT_HELLO, CLIENT_CHANGE_CIPHER_SPEC,
+ HELLO_RETRY_MSG, SERVER_CHANGE_CIPHER_SPEC,
+ CLIENT_HELLO,
+ SERVER_HELLO, ENCRYPTED_EXTENSIONS, SERVER_CERT_REQ, SERVER_CERT, SERVER_CERT_VERIFY, SERVER_FINISHED,
+ CLIENT_CERT, CLIENT_CERT_VERIFY, CLIENT_FINISHED,
+ APPLICATION_DATA
+ },
+
[NEGOTIATED | FULL_HANDSHAKE | HELLO_RETRY_REQUEST | CLIENT_AUTH | NO_CLIENT_CERT] = {
CLIENT_HELLO,
HELLO_RETRY_MSG,
@@ -471,6 +544,15 @@ static message_type_t tls13_handshakes[S2N_HANDSHAKES_COUNT][S2N_MAX_HANDSHAKE_L
APPLICATION_DATA
},
+ [NEGOTIATED | FULL_HANDSHAKE | HELLO_RETRY_REQUEST | CLIENT_AUTH | NO_CLIENT_CERT | MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS] = {
+ CLIENT_HELLO, CLIENT_CHANGE_CIPHER_SPEC,
+ HELLO_RETRY_MSG, SERVER_CHANGE_CIPHER_SPEC,
+ CLIENT_HELLO,
+ SERVER_HELLO, ENCRYPTED_EXTENSIONS, SERVER_CERT_REQ, SERVER_CERT, SERVER_CERT_VERIFY, SERVER_FINISHED,
+ CLIENT_CERT, CLIENT_FINISHED,
+ APPLICATION_DATA
+ },
+
[NEGOTIATED | FULL_HANDSHAKE | CLIENT_AUTH] = {
CLIENT_HELLO,
SERVER_HELLO, ENCRYPTED_EXTENSIONS, SERVER_CERT_REQ, SERVER_CERT, SERVER_CERT_VERIFY, SERVER_FINISHED,
@@ -485,6 +567,13 @@ static message_type_t tls13_handshakes[S2N_HANDSHAKES_COUNT][S2N_MAX_HANDSHAKE_L
APPLICATION_DATA
},
+ [NEGOTIATED | FULL_HANDSHAKE | CLIENT_AUTH | MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS] = {
+ CLIENT_HELLO, CLIENT_CHANGE_CIPHER_SPEC,
+ SERVER_HELLO, SERVER_CHANGE_CIPHER_SPEC, ENCRYPTED_EXTENSIONS, SERVER_CERT_REQ, SERVER_CERT, SERVER_CERT_VERIFY, SERVER_FINISHED,
+ CLIENT_CERT, CLIENT_CERT_VERIFY, CLIENT_FINISHED,
+ APPLICATION_DATA
+ },
+
[NEGOTIATED | FULL_HANDSHAKE | CLIENT_AUTH | NO_CLIENT_CERT] = {
CLIENT_HELLO,
SERVER_HELLO, ENCRYPTED_EXTENSIONS, SERVER_CERT_REQ, SERVER_CERT, SERVER_CERT_VERIFY, SERVER_FINISHED,
@@ -498,22 +587,38 @@ static message_type_t tls13_handshakes[S2N_HANDSHAKES_COUNT][S2N_MAX_HANDSHAKE_L
CLIENT_CHANGE_CIPHER_SPEC, CLIENT_CERT, CLIENT_FINISHED,
APPLICATION_DATA
},
+
+ [NEGOTIATED | FULL_HANDSHAKE | CLIENT_AUTH | NO_CLIENT_CERT | MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS] = {
+ CLIENT_HELLO, CLIENT_CHANGE_CIPHER_SPEC,
+ SERVER_HELLO, SERVER_CHANGE_CIPHER_SPEC, ENCRYPTED_EXTENSIONS, SERVER_CERT_REQ, SERVER_CERT, SERVER_CERT_VERIFY, SERVER_FINISHED,
+ CLIENT_CERT, CLIENT_FINISHED,
+ APPLICATION_DATA
+ },
};
/* clang-format on */
-#define MAX_HANDSHAKE_TYPE_LEN 152
+#define MAX_HANDSHAKE_TYPE_LEN 123
static char handshake_type_str[S2N_HANDSHAKES_COUNT][MAX_HANDSHAKE_TYPE_LEN] = {0};
-static const char* handshake_type_names[] = {
+static const char* tls12_handshake_type_names[] = {
"NEGOTIATED|",
"FULL_HANDSHAKE|",
+ "CLIENT_AUTH|",
+ "NO_CLIENT_CERT|",
"TLS12_PERFECT_FORWARD_SECRECY|",
"OCSP_STATUS|",
- "CLIENT_AUTH|",
"WITH_SESSION_TICKET|",
+};
+
+static const char* tls13_handshake_type_names[] = {
+ "NEGOTIATED|",
+ "FULL_HANDSHAKE|",
+ "CLIENT_AUTH|",
"NO_CLIENT_CERT|",
"HELLO_RETRY_REQUEST|",
"MIDDLEBOX_COMPAT|",
+ "WITH_EARLY_DATA|",
+ "EARLY_CLIENT_CCS|",
};
#define IS_TLS13_HANDSHAKE( conn ) ((conn)->actual_protocol_version == S2N_TLS13)
@@ -533,7 +638,7 @@ static const char* handshake_type_names[] = {
#define CONNECTION_WRITER( conn ) (conn->mode == S2N_CLIENT ? 'C' : 'S')
#define CONNECTION_IS_WRITER( conn ) (ACTIVE_STATE(conn).writer == CONNECTION_WRITER(conn))
-/* Used in our test cases */
+/* Only used in our test cases. */
message_type_t s2n_conn_get_current_message_type(struct s2n_connection *conn)
{
return ACTIVE_MESSAGE(conn);
@@ -556,37 +661,37 @@ static int s2n_advance_message(struct s2n_connection *conn)
}
/* Set TCP_QUICKACK to avoid artificial delay during the handshake */
- GUARD(s2n_socket_quickack(conn));
+ POSIX_GUARD(s2n_socket_quickack(conn));
/* If optimized io hasn't been enabled or if the caller started out with a corked socket,
* we don't mess with it
*/
if (!conn->corked_io || s2n_socket_was_corked(conn)) {
- return 0;
+ return S2N_SUCCESS;
}
/* Are we changing I/O directions */
if (ACTIVE_STATE(conn).writer == previous_writer || ACTIVE_STATE(conn).writer == 'A') {
- return 0;
+ return S2N_SUCCESS;
}
/* We're the new writer */
if (ACTIVE_STATE(conn).writer == this_mode) {
if (s2n_connection_is_managed_corked(conn)) {
/* Set TCP_CORK/NOPUSH */
- GUARD(s2n_socket_write_cork(conn));
+ POSIX_GUARD(s2n_socket_write_cork(conn));
}
- return 0;
+ return S2N_SUCCESS;
}
/* We're the new reader, or we reached the "B" writer stage indicating that
we're at the application data stage - uncork the data */
if (s2n_connection_is_managed_corked(conn)) {
- GUARD(s2n_socket_write_uncork(conn));
+ POSIX_GUARD(s2n_socket_write_uncork(conn));
}
- return 0;
+ return S2N_SUCCESS;
}
int s2n_generate_new_client_session_id(struct s2n_connection *conn)
@@ -595,104 +700,149 @@ int s2n_generate_new_client_session_id(struct s2n_connection *conn)
struct s2n_blob session_id = { .data = conn->session_id, .size = S2N_TLS_SESSION_ID_MAX_LEN };
/* Generate a new session id */
- GUARD_AS_POSIX(s2n_get_public_random_data(&session_id));
+ POSIX_GUARD_RESULT(s2n_get_public_random_data(&session_id));
conn->session_id_len = S2N_TLS_SESSION_ID_MAX_LEN;
}
- return 0;
+ return S2N_SUCCESS;
}
/* Lets the server flag whether a HelloRetryRequest is needed while processing extensions */
int s2n_set_hello_retry_required(struct s2n_connection *conn)
{
- notnull_check(conn);
-
- ENSURE_POSIX(conn->actual_protocol_version >= S2N_TLS13, S2N_ERR_INVALID_HELLO_RETRY);
- conn->handshake.handshake_type |= HELLO_RETRY_REQUEST;
+ POSIX_ENSURE_REF(conn);
+
+ POSIX_ENSURE(conn->actual_protocol_version >= S2N_TLS13, S2N_ERR_INVALID_HELLO_RETRY);
+ POSIX_GUARD_RESULT(s2n_handshake_type_set_tls13_flag(conn, HELLO_RETRY_REQUEST));
+
+ /* HelloRetryRequests also indicate rejection of early data.
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# A server which receives an "early_data" extension MUST behave in one
+ *# of three ways:
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# - Request that the client send another ClientHello by responding
+ *# with a HelloRetryRequest.
+ **/
+ if (conn->early_data_state == S2N_EARLY_DATA_REQUESTED) {
+ POSIX_GUARD_RESULT(s2n_connection_set_early_data_state(conn, S2N_EARLY_DATA_REJECTED));
+ }
return S2N_SUCCESS;
}
bool s2n_is_hello_retry_message(struct s2n_connection *conn)
{
- return (ACTIVE_MESSAGE(conn) == HELLO_RETRY_MSG);
+ return (conn != NULL &&
+ s2n_result_is_ok(s2n_handshake_validate(&(conn->handshake))) &&
+ ACTIVE_MESSAGE(conn) == HELLO_RETRY_MSG);
}
bool s2n_is_hello_retry_handshake(struct s2n_connection *conn)
{
- return conn->handshake.handshake_type & HELLO_RETRY_REQUEST;
+ return IS_HELLO_RETRY_HANDSHAKE(conn);
}
static S2N_RESULT s2n_conn_set_tls13_handshake_type(struct s2n_connection *conn) {
- ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn);
- if (conn->handshake.handshake_type & HELLO_RETRY_REQUEST) {
- conn->handshake.handshake_type = HELLO_RETRY_REQUEST;
- } else {
- conn->handshake.handshake_type = INITIAL;
- }
+ /* Most handshake type flags should be reset before we calculate the handshake type,
+ * in order to handle changes during retries.
+ * However, flags that have already affected the message order must be kept to avoid
+ * rewriting the past.
+ */
+ conn->handshake.handshake_type &= (HELLO_RETRY_REQUEST | MIDDLEBOX_COMPAT | EARLY_CLIENT_CCS);
/* A handshake type has been negotiated */
- conn->handshake.handshake_type |= NEGOTIATED;
+ RESULT_GUARD(s2n_handshake_type_set_flag(conn, NEGOTIATED));
if (conn->psk_params.chosen_psk == NULL) {
- conn->handshake.handshake_type |= FULL_HANDSHAKE;
+ RESULT_GUARD(s2n_handshake_type_set_flag(conn, FULL_HANDSHAKE));
+ }
+
+ if (conn->early_data_state == S2N_EARLY_DATA_ACCEPTED) {
+ conn->handshake.handshake_type |= WITH_EARLY_DATA;
}
s2n_cert_auth_type client_cert_auth_type;
- GUARD_AS_RESULT(s2n_connection_get_client_auth_type(conn, &client_cert_auth_type));
+ RESULT_GUARD_POSIX(s2n_connection_get_client_auth_type(conn, &client_cert_auth_type));
if (conn->mode == S2N_CLIENT && client_cert_auth_type == S2N_CERT_AUTH_REQUIRED
- && conn->handshake.handshake_type & FULL_HANDSHAKE) {
+ && IS_FULL_HANDSHAKE(conn)) {
/* If we're a client, and Client Auth is REQUIRED, then the Client must expect the CLIENT_CERT_REQ Message */
- conn->handshake.handshake_type |= CLIENT_AUTH;
+ RESULT_GUARD(s2n_handshake_type_set_flag(conn, CLIENT_AUTH));
} else if (conn->mode == S2N_SERVER && client_cert_auth_type != S2N_CERT_AUTH_NONE
- && conn->handshake.handshake_type & FULL_HANDSHAKE) {
+ && IS_FULL_HANDSHAKE(conn)) {
/* If we're a server, and Client Auth is REQUIRED or OPTIONAL, then the server must send the CLIENT_CERT_REQ Message*/
- conn->handshake.handshake_type |= CLIENT_AUTH;
+ RESULT_GUARD(s2n_handshake_type_set_flag(conn, CLIENT_AUTH));
}
- /* Use middlebox compatibility mode for TLS1.3 by default.
- * For now, only disable it when QUIC support is enabled. */
- if (!conn->config->quic_enabled) {
- conn->handshake.handshake_type |= MIDDLEBOX_COMPAT;
+ if (s2n_is_middlebox_compat_enabled(conn)) {
+ RESULT_GUARD(s2n_handshake_type_set_tls13_flag(conn, MIDDLEBOX_COMPAT));
}
return S2N_RESULT_OK;
}
+static S2N_RESULT s2n_validate_ems_status(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+
+ s2n_extension_type_id ems_ext_id = 0;
+ RESULT_GUARD_POSIX(s2n_extension_supported_iana_value_to_id(TLS_EXTENSION_EMS, &ems_ext_id));
+ bool ems_extension_recv = S2N_CBIT_TEST(conn->extension_requests_received, ems_ext_id);
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc7627#section-5.3
+ *# If the original session used the "extended_master_secret"
+ *# extension but the new ClientHello does not contain it, the server
+ *# MUST abort the abbreviated handshake.
+ **/
+ if (conn->ems_negotiated) {
+ RESULT_ENSURE(ems_extension_recv, S2N_ERR_MISSING_EXTENSION);
+ }
+
+ /* Since we're discarding the resumption ticket, ignore EMS value from the ticket */
+ conn->ems_negotiated = ems_extension_recv;
+
+ return S2N_RESULT_OK;
+}
+
int s2n_conn_set_handshake_type(struct s2n_connection *conn)
{
if (IS_TLS13_HANDSHAKE(conn)) {
- GUARD_AS_POSIX(s2n_conn_set_tls13_handshake_type(conn));
+ POSIX_GUARD_RESULT(s2n_conn_set_tls13_handshake_type(conn));
return S2N_SUCCESS;
}
- S2N_ERROR_IF(conn->handshake.handshake_type & HELLO_RETRY_REQUEST, S2N_ERR_INVALID_HELLO_RETRY);
+ POSIX_GUARD_RESULT(s2n_handshake_type_reset(conn));
/* A handshake type has been negotiated */
- conn->handshake.handshake_type = NEGOTIATED;
+ POSIX_GUARD_RESULT(s2n_handshake_type_set_flag(conn, NEGOTIATED));
s2n_cert_auth_type client_cert_auth_type;
- GUARD(s2n_connection_get_client_auth_type(conn, &client_cert_auth_type));
+ POSIX_GUARD(s2n_connection_get_client_auth_type(conn, &client_cert_auth_type));
if (conn->mode == S2N_CLIENT && client_cert_auth_type == S2N_CERT_AUTH_REQUIRED) {
/* If we're a client, and Client Auth is REQUIRED, then the Client must expect the CLIENT_CERT_REQ Message */
- conn->handshake.handshake_type |= CLIENT_AUTH;
+ POSIX_GUARD_RESULT(s2n_handshake_type_set_flag(conn, CLIENT_AUTH));
} else if (conn->mode == S2N_SERVER && client_cert_auth_type != S2N_CERT_AUTH_NONE) {
/* If we're a server, and Client Auth is REQUIRED or OPTIONAL, then the server must send the CLIENT_CERT_REQ Message*/
- conn->handshake.handshake_type |= CLIENT_AUTH;
+ POSIX_GUARD_RESULT(s2n_handshake_type_set_flag(conn, CLIENT_AUTH));
}
if (conn->config->use_tickets) {
if (conn->session_ticket_status == S2N_DECRYPT_TICKET) {
- if (!s2n_decrypt_session_ticket(conn)) {
- return 0;
+ if (s2n_decrypt_session_ticket(conn, &conn->client_ticket_to_decrypt) == S2N_SUCCESS) {
+ return S2N_SUCCESS;
}
+ POSIX_GUARD_RESULT(s2n_validate_ems_status(conn));
+
if (s2n_config_is_encrypt_decrypt_key_available(conn->config) == 1) {
conn->session_ticket_status = S2N_NEW_TICKET;
- conn->handshake.handshake_type |= WITH_SESSION_TICKET;
+ POSIX_GUARD_RESULT(s2n_handshake_type_set_tls12_flag(conn, WITH_SESSION_TICKET));
}
/* If a session ticket is presented by the client, then skip lookup in Session ID server cache */
@@ -700,7 +850,7 @@ int s2n_conn_set_handshake_type(struct s2n_connection *conn)
}
if (conn->session_ticket_status == S2N_NEW_TICKET) {
- conn->handshake.handshake_type |= WITH_SESSION_TICKET;
+ POSIX_GUARD_RESULT(s2n_handshake_type_set_tls12_flag(conn, WITH_SESSION_TICKET));
}
}
@@ -708,92 +858,92 @@ int s2n_conn_set_handshake_type(struct s2n_connection *conn)
* Client sent in the ClientHello. */
if (conn->actual_protocol_version <= S2N_TLS12 && conn->mode == S2N_SERVER && s2n_allowed_to_cache_connection(conn)) {
int r = s2n_resume_from_cache(conn);
- if (r == S2N_SUCCESS || (r < 0 && S2N_ERROR_IS_BLOCKING(s2n_errno))) {
+ if (r == S2N_SUCCESS || (r < S2N_SUCCESS && S2N_ERROR_IS_BLOCKING(s2n_errno))) {
return r;
}
+ POSIX_GUARD_RESULT(s2n_validate_ems_status(conn));
}
skip_cache_lookup:
if (conn->mode == S2N_CLIENT && conn->client_session_resumed == 1) {
- return 0;
+ return S2N_SUCCESS;
}
/* If we're doing full handshake, generate a new session id. */
- GUARD(s2n_generate_new_client_session_id(conn));
+ POSIX_GUARD(s2n_generate_new_client_session_id(conn));
/* If we get this far, it's a full handshake */
- conn->handshake.handshake_type |= FULL_HANDSHAKE;
+ POSIX_GUARD_RESULT(s2n_handshake_type_set_flag(conn, FULL_HANDSHAKE));
bool is_ephemeral = false;
- GUARD_AS_POSIX(s2n_kex_is_ephemeral(conn->secure.cipher_suite->key_exchange_alg, &is_ephemeral));
+ POSIX_GUARD_RESULT(s2n_kex_is_ephemeral(conn->secure.cipher_suite->key_exchange_alg, &is_ephemeral));
if (is_ephemeral) {
- conn->handshake.handshake_type |= TLS12_PERFECT_FORWARD_SECRECY;
+ POSIX_GUARD_RESULT(s2n_handshake_type_set_tls12_flag(conn, TLS12_PERFECT_FORWARD_SECRECY));
}
if (s2n_server_can_send_ocsp(conn) || s2n_server_sent_ocsp(conn)) {
- conn->handshake.handshake_type |= OCSP_STATUS;
+ POSIX_GUARD_RESULT(s2n_handshake_type_set_tls12_flag(conn, OCSP_STATUS));
}
- return 0;
+ return S2N_SUCCESS;
}
int s2n_conn_set_handshake_no_client_cert(struct s2n_connection *conn)
{
s2n_cert_auth_type client_cert_auth_type;
- GUARD(s2n_connection_get_client_auth_type(conn, &client_cert_auth_type));
+ POSIX_GUARD(s2n_connection_get_client_auth_type(conn, &client_cert_auth_type));
S2N_ERROR_IF(client_cert_auth_type != S2N_CERT_AUTH_OPTIONAL, S2N_ERR_BAD_MESSAGE);
- conn->handshake.handshake_type |= NO_CLIENT_CERT;
-
- return 0;
-}
-
-int s2n_conn_set_handshake_read_block(struct s2n_connection *conn)
-{
- notnull_check(conn);
-
- conn->handshake.paused = 1;
-
- return 0;
-}
-
-int s2n_conn_clear_handshake_read_block(struct s2n_connection *conn)
-{
- notnull_check(conn);
-
- conn->handshake.paused = 0;
+ POSIX_GUARD_RESULT(s2n_handshake_type_set_flag(conn, NO_CLIENT_CERT));
- return 0;
+ return S2N_SUCCESS;
}
const char *s2n_connection_get_last_message_name(struct s2n_connection *conn)
{
- notnull_check_ptr(conn);
-
+ PTR_ENSURE_REF(conn);
+ PTR_GUARD_RESULT(s2n_handshake_validate(&(conn->handshake)));
return message_names[ACTIVE_MESSAGE(conn)];
}
const char *s2n_connection_get_handshake_type_name(struct s2n_connection *conn)
{
- notnull_check_ptr(conn);
+ PTR_ENSURE_REF(conn);
+ PTR_PRECONDITION(s2n_handshake_validate(&(conn->handshake)));
- int handshake_type = conn->handshake.handshake_type;
+ uint32_t handshake_type = conn->handshake.handshake_type;
if (handshake_type == INITIAL) {
return "INITIAL";
}
+ const char** handshake_type_names = tls13_handshake_type_names;
+ size_t handshake_type_names_len = s2n_array_len(tls13_handshake_type_names);
+ if (s2n_connection_get_protocol_version(conn) < S2N_TLS13) {
+ handshake_type_names = tls12_handshake_type_names;
+ handshake_type_names_len = s2n_array_len(tls12_handshake_type_names);
+ }
+
if (handshake_type_str[handshake_type][0] != '\0') {
return handshake_type_str[handshake_type];
}
- /* Compute handshake_type_str[handshake_type] */
+ /* Compute handshake_type_str[handshake_type] by concatenating
+ * each applicable handshake_type.
+ *
+ * Unit tests enforce that the elements of handshake_type_str are always
+ * long enough to contain the longest possible valid handshake_type, but
+ * for safety we still handle the case where we need to truncate.
+ */
char *p = handshake_type_str[handshake_type];
- char *end = p + sizeof(handshake_type_str[0]);
-
- for (int i = 0; i < s2n_array_len(handshake_type_names); ++i) {
+ size_t remaining = sizeof(handshake_type_str[0]);
+ for (size_t i = 0; i < handshake_type_names_len; i++) {
if (handshake_type & (1 << i)) {
- p = s2n_strcpy(p, end, handshake_type_names[i]);
+ size_t bytes_to_copy = MIN(remaining, strlen(handshake_type_names[i]));
+ PTR_CHECKED_MEMCPY(p, handshake_type_names[i], bytes_to_copy);
+ p[bytes_to_copy] = '\0';
+ p += bytes_to_copy;
+ remaining -= bytes_to_copy;
}
}
@@ -820,11 +970,11 @@ static int s2n_handshake_write_io(struct s2n_connection *conn)
*/
if (s2n_stuffer_is_wiped(&conn->handshake.io)) {
if (record_type == TLS_HANDSHAKE) {
- GUARD(s2n_handshake_write_header(&conn->handshake.io, ACTIVE_STATE(conn).message_type));
+ POSIX_GUARD(s2n_handshake_write_header(&conn->handshake.io, ACTIVE_STATE(conn).message_type));
}
- GUARD(ACTIVE_STATE(conn).handler[conn->mode] (conn));
+ POSIX_GUARD(ACTIVE_STATE(conn).handler[conn->mode] (conn));
if (record_type == TLS_HANDSHAKE) {
- GUARD(s2n_handshake_finish_header(&conn->handshake.io));
+ POSIX_GUARD(s2n_handshake_finish_header(&conn->handshake.io));
}
}
@@ -832,38 +982,39 @@ static int s2n_handshake_write_io(struct s2n_connection *conn)
struct s2n_blob out = {0};
while (s2n_stuffer_data_available(&conn->handshake.io) > 0) {
uint16_t max_payload_size = 0;
- GUARD_AS_POSIX(s2n_record_max_write_payload_size(conn, &max_payload_size));
+ POSIX_GUARD_RESULT(s2n_record_max_write_payload_size(conn, &max_payload_size));
out.size = MIN(s2n_stuffer_data_available(&conn->handshake.io), max_payload_size);
out.data = s2n_stuffer_raw_read(&conn->handshake.io, out.size);
- notnull_check(out.data);
+ POSIX_ENSURE_REF(out.data);
- if (conn->config->quic_enabled) {
- GUARD_AS_POSIX(s2n_quic_write_handshake_message(conn, &out));
+ if (s2n_connection_is_quic_enabled(conn)) {
+ POSIX_GUARD_RESULT(s2n_quic_write_handshake_message(conn, &out));
} else {
- GUARD(s2n_record_write(conn, record_type, &out));
+ POSIX_GUARD(s2n_record_write(conn, record_type, &out));
}
/* MD5 and SHA sum the handshake data too */
if (record_type == TLS_HANDSHAKE) {
- GUARD(s2n_conn_update_handshake_hashes(conn, &out));
+ POSIX_GUARD(s2n_conn_update_handshake_hashes(conn, &out));
}
/* Actually send the record. We could block here. Assume the caller will call flush before coming back. */
- GUARD(s2n_flush(conn, &blocked));
+ POSIX_GUARD(s2n_flush(conn, &blocked));
}
/* We're done sending the last record, reset everything */
- GUARD(s2n_stuffer_wipe(&conn->out));
- GUARD(s2n_stuffer_wipe(&conn->handshake.io));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->out));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->handshake.io));
/* Update the secrets, if necessary */
- GUARD(s2n_tls13_handle_secrets(conn));
+ POSIX_GUARD_RESULT(s2n_tls13_secrets_update(conn));
+ POSIX_GUARD_RESULT(s2n_tls13_key_schedule_update(conn));
/* Advance the state machine */
- GUARD(s2n_advance_message(conn));
+ POSIX_GUARD(s2n_advance_message(conn));
- return 0;
+ return S2N_SUCCESS;
}
/*
@@ -880,16 +1031,16 @@ static int s2n_read_full_handshake_message(struct s2n_connection *conn, uint8_t
* what we can and then continue to the next record read iteration.
*/
if (s2n_stuffer_data_available(&conn->in) < (TLS_HANDSHAKE_HEADER_LENGTH - current_handshake_data)) {
- GUARD(s2n_stuffer_copy(&conn->in, &conn->handshake.io, s2n_stuffer_data_available(&conn->in)));
+ POSIX_GUARD(s2n_stuffer_copy(&conn->in, &conn->handshake.io, s2n_stuffer_data_available(&conn->in)));
return 1;
}
/* Get the remainder of the header */
- GUARD(s2n_stuffer_copy(&conn->in, &conn->handshake.io, (TLS_HANDSHAKE_HEADER_LENGTH - current_handshake_data)));
+ POSIX_GUARD(s2n_stuffer_copy(&conn->in, &conn->handshake.io, (TLS_HANDSHAKE_HEADER_LENGTH - current_handshake_data)));
}
uint32_t handshake_message_length;
- GUARD(s2n_handshake_parse_header(conn, message_type, &handshake_message_length));
+ POSIX_GUARD(s2n_handshake_parse_header(conn, message_type, &handshake_message_length));
S2N_ERROR_IF(handshake_message_length > S2N_MAXIMUM_HANDSHAKE_MESSAGE_LENGTH, S2N_ERR_BAD_MESSAGE);
@@ -897,7 +1048,7 @@ static int s2n_read_full_handshake_message(struct s2n_connection *conn, uint8_t
bytes_to_take = MIN(bytes_to_take, s2n_stuffer_data_available(&conn->in));
/* If the record is handshake data, add it to the handshake buffer */
- GUARD(s2n_stuffer_copy(&conn->in, &conn->handshake.io, bytes_to_take));
+ POSIX_GUARD(s2n_stuffer_copy(&conn->in, &conn->handshake.io, bytes_to_take));
/* If we have the whole handshake message, then success */
if (s2n_stuffer_data_available(&conn->handshake.io) == handshake_message_length) {
@@ -905,7 +1056,7 @@ static int s2n_read_full_handshake_message(struct s2n_connection *conn, uint8_t
}
/* We don't have the whole message, so we'll need to go again */
- GUARD(s2n_stuffer_reread(&conn->handshake.io));
+ POSIX_GUARD(s2n_stuffer_reread(&conn->handshake.io));
return 1;
}
@@ -915,18 +1066,18 @@ static int s2n_handshake_conn_update_hashes(struct s2n_connection *conn)
uint8_t message_type;
uint32_t handshake_message_length;
- GUARD(s2n_stuffer_reread(&conn->handshake.io));
- GUARD(s2n_handshake_parse_header(conn, &message_type, &handshake_message_length));
+ POSIX_GUARD(s2n_stuffer_reread(&conn->handshake.io));
+ POSIX_GUARD(s2n_handshake_parse_header(conn, &message_type, &handshake_message_length));
struct s2n_blob handshake_record = {0};
handshake_record.data = conn->handshake.io.blob.data;
handshake_record.size = TLS_HANDSHAKE_HEADER_LENGTH + handshake_message_length;
- notnull_check(handshake_record.data);
+ POSIX_ENSURE_REF(handshake_record.data);
/* MD5 and SHA sum the handshake data too */
- GUARD(s2n_conn_update_handshake_hashes(conn, &handshake_record));
+ POSIX_GUARD(s2n_conn_update_handshake_hashes(conn, &handshake_record));
- return 0;
+ return S2N_SUCCESS;
}
static int s2n_handshake_handle_sslv2(struct s2n_connection *conn)
@@ -935,60 +1086,64 @@ static int s2n_handshake_handle_sslv2(struct s2n_connection *conn)
/* Add the message to our handshake hashes */
struct s2n_blob hashed = {.data = conn->header_in.blob.data + 2,.size = 3 };
- GUARD(s2n_conn_update_handshake_hashes(conn, &hashed));
+ POSIX_GUARD(s2n_conn_update_handshake_hashes(conn, &hashed));
hashed.data = conn->in.blob.data;
hashed.size = s2n_stuffer_data_available(&conn->in);
- GUARD(s2n_conn_update_handshake_hashes(conn, &hashed));
+ POSIX_GUARD(s2n_conn_update_handshake_hashes(conn, &hashed));
/* Handle an SSLv2 client hello */
- GUARD(s2n_stuffer_copy(&conn->in, &conn->handshake.io, s2n_stuffer_data_available(&conn->in)));
+ POSIX_GUARD(s2n_stuffer_copy(&conn->in, &conn->handshake.io, s2n_stuffer_data_available(&conn->in)));
/* Set the client hello version */
conn->client_hello_version = S2N_SSLv2;
/* Execute the state machine handler */
int r = ACTIVE_STATE(conn).handler[conn->mode](conn);
- GUARD(s2n_stuffer_wipe(&conn->handshake.io));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->handshake.io));
/* We're done with the record, wipe it */
- GUARD(s2n_stuffer_wipe(&conn->header_in));
- GUARD(s2n_stuffer_wipe(&conn->in));
- if (r < 0) {
- /* Don't invoke blinding on some of the common errors */
- switch (s2n_errno) {
- case S2N_ERR_CANCELLED:
- case S2N_ERR_CIPHER_NOT_SUPPORTED:
- case S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED:
- conn->closed = 1;
- break;
- case S2N_ERR_IO_BLOCKED:
- case S2N_ERR_ASYNC_BLOCKED:
- /* A blocking condition is retryable, so we should return without killing the connection. */
- S2N_ERROR_PRESERVE_ERRNO();
- break;
- default:
- GUARD(s2n_connection_kill(conn));
- }
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->header_in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->in));
- return r;
- }
+ WITH_ERROR_BLINDING(conn, POSIX_GUARD(r));
conn->in_status = ENCRYPTED;
/* Advance the state machine */
- GUARD(s2n_advance_message(conn));
+ POSIX_GUARD(s2n_advance_message(conn));
- return 0;
+ return S2N_SUCCESS;
}
static int s2n_try_delete_session_cache(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
if (s2n_allowed_to_cache_connection(conn) > 0) {
conn->config->cache_delete(conn, conn->config->cache_delete_data, conn->session_id, conn->session_id_len);
}
- return 0;
+ return S2N_SUCCESS;
+}
+
+static S2N_RESULT s2n_wipe_record(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_GUARD_POSIX(s2n_stuffer_wipe(&conn->header_in));
+ RESULT_GUARD_POSIX(s2n_stuffer_wipe(&conn->in));
+ conn->in_status = ENCRYPTED;
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_finish_read(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+
+ RESULT_GUARD_POSIX(s2n_handshake_conn_update_hashes(conn));
+ RESULT_GUARD_POSIX(s2n_stuffer_wipe(&conn->handshake.io));
+ RESULT_GUARD(s2n_tls13_secrets_update(conn));
+ RESULT_GUARD(s2n_tls13_key_schedule_update(conn));
+ RESULT_GUARD_POSIX(s2n_advance_message(conn));
+ return S2N_RESULT_OK;
}
/* Reading is a little more complicated than writing as the TLS RFCs allow content
@@ -1006,61 +1161,84 @@ static int s2n_handshake_read_io(struct s2n_connection *conn)
/* Fill conn->in stuffer necessary for the handshake.
* If using TCP, read a record. If using QUIC, read a message. */
- if (conn->config->quic_enabled) {
+ if (s2n_connection_is_quic_enabled(conn)) {
record_type = TLS_HANDSHAKE;
- GUARD_AS_POSIX(s2n_quic_read_handshake_message(conn, &message_type));
+ POSIX_GUARD_RESULT(s2n_quic_read_handshake_message(conn, &message_type));
} else {
- GUARD(s2n_read_full_record(conn, &record_type, &isSSLv2));
+ int r = s2n_read_full_record(conn, &record_type, &isSSLv2);
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# If the client attempts a 0-RTT handshake but the server
+ *# rejects it, the server will generally not have the 0-RTT record
+ *# protection keys and must instead use trial decryption (either with
+ *# the 1-RTT handshake keys or by looking for a cleartext ClientHello in
+ *# the case of a HelloRetryRequest) to find the first non-0-RTT message.
+ *#
+ *# If the server chooses to accept the "early_data" extension, then it
+ *# MUST comply with the same error-handling requirements specified for
+ *# all records when processing early data records. Specifically, if the
+ *# server fails to decrypt a 0-RTT record following an accepted
+ *# "early_data" extension, it MUST terminate the connection with a
+ *# "bad_record_mac" alert as per Section 5.2.
+ */
+ if ((r < S2N_SUCCESS) && (s2n_errno == S2N_ERR_EARLY_DATA_TRIAL_DECRYPT)) {
+ POSIX_GUARD(s2n_stuffer_reread(&conn->in));
+ POSIX_GUARD_RESULT(s2n_early_data_record_bytes(conn, s2n_stuffer_data_available(&conn->in)));
+ POSIX_GUARD_RESULT(s2n_wipe_record(conn));
+ return S2N_SUCCESS;
+ }
+ POSIX_GUARD(r);
}
if (isSSLv2) {
S2N_ERROR_IF(record_type != SSLv2_CLIENT_HELLO, S2N_ERR_BAD_MESSAGE);
- GUARD(s2n_handshake_handle_sslv2(conn));
+ POSIX_GUARD(s2n_handshake_handle_sslv2(conn));
}
/* Now we have a record, but it could be a partial fragment of a message, or it might
* contain several messages.
*/
- S2N_ERROR_IF(record_type == TLS_APPLICATION_DATA, S2N_ERR_BAD_MESSAGE);
- if (record_type == TLS_CHANGE_CIPHER_SPEC) {
+
+ if (record_type == TLS_APPLICATION_DATA) {
+ POSIX_ENSURE(conn->early_data_expected, S2N_ERR_BAD_MESSAGE);
+ POSIX_GUARD_RESULT(s2n_early_data_validate_recv(conn));
+ POSIX_BAIL(S2N_ERR_EARLY_DATA_BLOCKED);
+ } else if (record_type == TLS_CHANGE_CIPHER_SPEC) {
/* TLS1.3 can receive unexpected CCS messages at any point in the handshake
* due to a peer operating in middlebox compatibility mode.
* However, when operating in QUIC mode, S2N should not accept ANY CCS messages,
* including these unexpected ones.*/
- if (!IS_TLS13_HANDSHAKE(conn) || conn->config->quic_enabled) {
- ENSURE_POSIX(EXPECTED_RECORD_TYPE(conn) == TLS_CHANGE_CIPHER_SPEC, S2N_ERR_BAD_MESSAGE);
- ENSURE_POSIX(!CONNECTION_IS_WRITER(conn), S2N_ERR_BAD_MESSAGE);
+ if (!IS_TLS13_HANDSHAKE(conn) || s2n_connection_is_quic_enabled(conn)) {
+ POSIX_ENSURE(EXPECTED_RECORD_TYPE(conn) == TLS_CHANGE_CIPHER_SPEC, S2N_ERR_BAD_MESSAGE);
+ POSIX_ENSURE(!CONNECTION_IS_WRITER(conn), S2N_ERR_BAD_MESSAGE);
}
S2N_ERROR_IF(s2n_stuffer_data_available(&conn->in) != 1, S2N_ERR_BAD_MESSAGE);
- GUARD(s2n_stuffer_copy(&conn->in, &conn->handshake.io, s2n_stuffer_data_available(&conn->in)));
- GUARD(CCS_STATE(conn).handler[conn->mode] (conn));
- GUARD(s2n_stuffer_wipe(&conn->handshake.io));
+ POSIX_GUARD(s2n_stuffer_copy(&conn->in, &conn->handshake.io, s2n_stuffer_data_available(&conn->in)));
+ POSIX_GUARD(CCS_STATE(conn).handler[conn->mode] (conn));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->handshake.io));
/* We're done with the record, wipe it */
- GUARD(s2n_stuffer_wipe(&conn->header_in));
- GUARD(s2n_stuffer_wipe(&conn->in));
- conn->in_status = ENCRYPTED;
+ POSIX_GUARD_RESULT(s2n_wipe_record(conn));
/* Advance the state machine if this was an expected message */
if (EXPECTED_RECORD_TYPE(conn) == TLS_CHANGE_CIPHER_SPEC && !CONNECTION_IS_WRITER(conn)) {
- GUARD(s2n_advance_message(conn));
+ POSIX_GUARD(s2n_advance_message(conn));
}
- return 0;
+ return S2N_SUCCESS;
} else if (record_type != TLS_HANDSHAKE) {
if (record_type == TLS_ALERT) {
- GUARD(s2n_process_alert_fragment(conn));
+ POSIX_GUARD(s2n_process_alert_fragment(conn));
}
/* Ignore record types that we don't support */
/* We're done with the record, wipe it */
- GUARD(s2n_stuffer_wipe(&conn->header_in));
- GUARD(s2n_stuffer_wipe(&conn->in));
- conn->in_status = ENCRYPTED;
- return 0;
+ POSIX_GUARD_RESULT(s2n_wipe_record(conn));
+ return S2N_SUCCESS;
}
/* Record is a handshake message */
@@ -1070,29 +1248,27 @@ static int s2n_handshake_read_io(struct s2n_connection *conn)
/* We're done with negotiating but we have trailing data in this record. Bail on the handshake. */
S2N_ERROR_IF(EXPECTED_RECORD_TYPE(conn) == TLS_APPLICATION_DATA, S2N_ERR_BAD_MESSAGE);
int r;
- GUARD((r = s2n_read_full_handshake_message(conn, &message_type)));
+ POSIX_GUARD((r = s2n_read_full_handshake_message(conn, &message_type)));
/* Do we need more data? This happens for message fragmentation */
if (r == 1) {
/* Break out of this inner loop, but since we're not changing the state, the
* outer loop in s2n_handshake_io() will read another record.
*/
- GUARD(s2n_stuffer_wipe(&conn->header_in));
- GUARD(s2n_stuffer_wipe(&conn->in));
- conn->in_status = ENCRYPTED;
- return 0;
+ POSIX_GUARD_RESULT(s2n_wipe_record(conn));
+ return S2N_SUCCESS;
}
s2n_cert_auth_type client_cert_auth_type;
- GUARD(s2n_connection_get_client_auth_type(conn, &client_cert_auth_type));
+ POSIX_GUARD(s2n_connection_get_client_auth_type(conn, &client_cert_auth_type));
/* If we're a Client, and received a ClientCertRequest message, and ClientAuth
* is set to optional, then switch the State Machine that we're using to expect the ClientCertRequest. */
if (conn->mode == S2N_CLIENT
&& client_cert_auth_type == S2N_CERT_AUTH_OPTIONAL
&& message_type == TLS_CERT_REQ) {
- ENSURE_POSIX(conn->handshake.handshake_type & FULL_HANDSHAKE, S2N_ERR_HANDSHAKE_STATE);
- conn->handshake.handshake_type |= CLIENT_AUTH;
+ POSIX_ENSURE(IS_FULL_HANDSHAKE(conn), S2N_ERR_HANDSHAKE_STATE);
+ POSIX_GUARD_RESULT(s2n_handshake_type_set_flag(conn, CLIENT_AUTH));
}
/* According to rfc6066 section 8, server may choose not to send "CertificateStatus" message even if it has
@@ -1100,55 +1276,35 @@ static int s2n_handshake_read_io(struct s2n_connection *conn)
if (conn->mode == S2N_CLIENT
&& EXPECTED_MESSAGE_TYPE(conn) == TLS_SERVER_CERT_STATUS
&& message_type != TLS_SERVER_CERT_STATUS) {
- conn->handshake.handshake_type &= ~OCSP_STATUS;
+ POSIX_GUARD_RESULT(s2n_handshake_type_unset_tls12_flag(conn, OCSP_STATUS));
}
- ENSURE_POSIX(record_type == EXPECTED_RECORD_TYPE(conn), S2N_ERR_BAD_MESSAGE);
- ENSURE_POSIX(message_type == EXPECTED_MESSAGE_TYPE(conn), S2N_ERR_BAD_MESSAGE);
- ENSURE_POSIX(!CONNECTION_IS_WRITER(conn), S2N_ERR_BAD_MESSAGE);
-
- /* Call the relevant handler */
- r = ACTIVE_STATE(conn).handler[conn->mode] (conn);
-
- /* Don't update handshake hashes until after the handler has executed since some handlers need to read the
- * hash values before they are updated. */
- GUARD(s2n_handshake_conn_update_hashes(conn));
-
- GUARD(s2n_stuffer_wipe(&conn->handshake.io));
-
- if (r < 0) {
- /* Don't invoke blinding on some of the common errors */
- switch (s2n_errno) {
- case S2N_ERR_CANCELLED:
- case S2N_ERR_CIPHER_NOT_SUPPORTED:
- case S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED:
- conn->closed = 1;
- break;
- case S2N_ERR_IO_BLOCKED:
- case S2N_ERR_ASYNC_BLOCKED:
- /* A blocking condition is retryable, so we should return without killing the connection. */
- S2N_ERROR_PRESERVE_ERRNO();
- break;
- default:
- GUARD(s2n_connection_kill(conn));
- }
-
- return r;
+ /*
+ *= https://tools.ietf.org/rfc/rfc5246#section-7.4
+ *# The one message that is not bound by these ordering rules
+ *# is the HelloRequest message, which can be sent at any time, but which
+ *# SHOULD be ignored by the client if it arrives in the middle of a handshake.
+ */
+ if (message_type == TLS_HELLO_REQUEST) {
+ POSIX_GUARD(s2n_client_hello_request_recv(conn));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->handshake.io));
+ continue;
}
- /* Update the secrets, if necessary */
- GUARD(s2n_tls13_handle_secrets(conn));
+ POSIX_ENSURE(record_type == EXPECTED_RECORD_TYPE(conn), S2N_ERR_BAD_MESSAGE);
+ POSIX_ENSURE(message_type == EXPECTED_MESSAGE_TYPE(conn), S2N_ERR_BAD_MESSAGE);
+ POSIX_ENSURE(!CONNECTION_IS_WRITER(conn), S2N_ERR_BAD_MESSAGE);
+
+ /* Call the relevant handler */
+ WITH_ERROR_BLINDING(conn, POSIX_GUARD(ACTIVE_STATE(conn).handler[conn->mode] (conn)));
/* Advance the state machine */
- GUARD(s2n_advance_message(conn));
+ POSIX_GUARD_RESULT(s2n_finish_read(conn));
}
/* We're done with the record, wipe it */
- GUARD(s2n_stuffer_wipe(&conn->header_in));
- GUARD(s2n_stuffer_wipe(&conn->in));
- conn->in_status = ENCRYPTED;
-
- return 0;
+ POSIX_GUARD_RESULT(s2n_wipe_record(conn));
+ return S2N_SUCCESS;
}
static int s2n_handle_retry_state(struct s2n_connection *conn)
@@ -1160,66 +1316,66 @@ static int s2n_handle_retry_state(struct s2n_connection *conn)
s2n_errno = S2N_ERR_OK;
const int r = ACTIVE_STATE(conn).handler[conn->mode] (conn);
- if (r < 0 && S2N_ERROR_IS_BLOCKING(s2n_errno)) {
+ if (r < S2N_SUCCESS && S2N_ERROR_IS_BLOCKING(s2n_errno)) {
/* If the handler is still waiting for data, return control to the caller. */
S2N_ERROR_PRESERVE_ERRNO();
}
+ /* Resume the handshake */
+ conn->handshake.paused = false;
+
if (!CONNECTION_IS_WRITER(conn)) {
/* We're done parsing the record, reset everything */
- GUARD(s2n_stuffer_wipe(&conn->header_in));
- GUARD(s2n_stuffer_wipe(&conn->in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->header_in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->in));
conn->in_status = ENCRYPTED;
}
- if (r < 0) {
- /* There is some other problem and we should kill the connection. */
- if (conn->session_id_len) {
- s2n_try_delete_session_cache(conn);
- }
-
- GUARD(s2n_connection_kill(conn));
- S2N_ERROR_PRESERVE_ERRNO();
- }
-
if (CONNECTION_IS_WRITER(conn)) {
+ POSIX_GUARD(r);
+
/* If we're the writer and handler just finished, update the record header if
* needed and let the s2n_handshake_write_io write the data to the socket */
if (EXPECTED_RECORD_TYPE(conn) == TLS_HANDSHAKE) {
- GUARD(s2n_handshake_finish_header(&conn->handshake.io));
+ POSIX_GUARD(s2n_handshake_finish_header(&conn->handshake.io));
}
} else {
+ if (r < S2N_SUCCESS && conn->session_id_len) {
+ s2n_try_delete_session_cache(conn);
+ }
+ WITH_ERROR_BLINDING(conn, POSIX_GUARD(r));
+
/* The read handler processed the record successfully, we are done with this
* record. Advance the state machine. */
- GUARD(s2n_advance_message(conn));
+ POSIX_GUARD_RESULT(s2n_finish_read(conn));
}
- return 0;
+ return S2N_SUCCESS;
}
-int s2n_negotiate(struct s2n_connection *conn, s2n_blocked_status *blocked)
+int s2n_negotiate_impl(struct s2n_connection *conn, s2n_blocked_status *blocked)
{
- notnull_check(conn);
- notnull_check(blocked);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(blocked);
- while (ACTIVE_STATE(conn).writer != 'B') {
+ while (ACTIVE_STATE(conn).writer != 'B' && ACTIVE_MESSAGE(conn) != conn->handshake.end_of_messages) {
errno = 0;
s2n_errno = S2N_ERR_OK;
/* Flush any pending I/O or alert messages */
- GUARD(s2n_flush(conn, blocked));
+ POSIX_GUARD(s2n_flush(conn, blocked));
/* If the handshake was paused, retry the current message */
if (conn->handshake.paused) {
*blocked = S2N_BLOCKED_ON_APPLICATION_INPUT;
- GUARD(s2n_handle_retry_state(conn));
+ POSIX_GUARD(s2n_handle_retry_state(conn));
}
if (CONNECTION_IS_WRITER(conn)) {
*blocked = S2N_BLOCKED_ON_WRITE;
const int write_result = s2n_handshake_write_io(conn);
- if (write_result < 0) {
+ if (write_result < S2N_SUCCESS) {
if (!S2N_ERROR_IS_BLOCKING(s2n_errno)) {
/* Non-retryable write error. The peer might have sent an alert. Try and read it. */
const int write_errno = errno;
@@ -1240,6 +1396,9 @@ int s2n_negotiate(struct s2n_connection *conn, s2n_blocked_status *blocked)
if (s2n_errno == S2N_ERR_ASYNC_BLOCKED) {
*blocked = S2N_BLOCKED_ON_APPLICATION_INPUT;
+ conn->handshake.paused = true;
+ } else if (s2n_errno == S2N_ERR_EARLY_DATA_BLOCKED) {
+ *blocked = S2N_BLOCKED_ON_EARLY_DATA;
}
S2N_ERROR_PRESERVE_ERRNO();
@@ -1248,7 +1407,7 @@ int s2n_negotiate(struct s2n_connection *conn, s2n_blocked_status *blocked)
*blocked = S2N_BLOCKED_ON_READ;
const int read_result = s2n_handshake_read_io(conn);
- if (read_result < 0) {
+ if (read_result < S2N_SUCCESS) {
/* One blocking condition is waiting on the session resumption cache. */
/* So we don't want to delete anything if we are blocked. */
if (!S2N_ERROR_IS_BLOCKING(s2n_errno) && conn->session_id_len) {
@@ -1257,19 +1416,38 @@ int s2n_negotiate(struct s2n_connection *conn, s2n_blocked_status *blocked)
if (s2n_errno == S2N_ERR_ASYNC_BLOCKED) {
*blocked = S2N_BLOCKED_ON_APPLICATION_INPUT;
+ conn->handshake.paused = true;
+ } else if (s2n_errno == S2N_ERR_EARLY_DATA_BLOCKED) {
+ *blocked = S2N_BLOCKED_ON_EARLY_DATA;
}
S2N_ERROR_PRESERVE_ERRNO();
}
}
- /* If the handshake has just ended, free up memory */
if (ACTIVE_STATE(conn).writer == 'B') {
- GUARD(s2n_stuffer_resize(&conn->handshake.io, 0));
+ /* Clean up handshake secrets */
+ POSIX_GUARD_RESULT(s2n_tls13_secrets_clean(conn));
+
+ /* Send any pending post-handshake messages */
+ POSIX_GUARD(s2n_post_handshake_send(conn, blocked));
+
+ /* If the handshake has just ended, free up memory */
+ POSIX_GUARD(s2n_stuffer_resize(&conn->handshake.io, 0));
}
}
*blocked = S2N_NOT_BLOCKED;
- return 0;
+ return S2N_SUCCESS;
+}
+
+int s2n_negotiate(struct s2n_connection *conn, s2n_blocked_status *blocked)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE(!conn->negotiate_in_use, S2N_ERR_REENTRANCY);
+ conn->negotiate_in_use = true;
+ int result = s2n_negotiate_impl(conn, blocked);
+ conn->negotiate_in_use = false;
+ return result;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_handshake_transcript.c b/contrib/restricted/aws/s2n/tls/s2n_handshake_transcript.c
index 7c6c3dffb6..d0eb448f41 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_handshake_transcript.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_handshake_transcript.c
@@ -24,21 +24,12 @@
/* Length of the synthetic message header */
#define MESSAGE_HASH_HEADER_LENGTH 4
-static int s2n_tls13_conn_copy_server_finished_hash(struct s2n_connection *conn) {
- notnull_check(conn);
- s2n_tls13_connection_keys(keys, conn);
- struct s2n_hash_state hash_state = {0};
-
- GUARD(s2n_handshake_get_hash_state(conn, keys.hash_algorithm, &hash_state));
- GUARD(s2n_hash_copy(&conn->handshake.server_finished_copy, &hash_state));
-
- return 0;
-}
-
int s2n_conn_update_handshake_hashes(struct s2n_connection *conn, struct s2n_blob *data)
{
- notnull_check(conn);
- notnull_check(data);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(data);
+ struct s2n_handshake_hashes *hashes = conn->handshake.hashes;
+ POSIX_ENSURE_REF(hashes);
if (s2n_handshake_is_hash_required(&conn->handshake, S2N_HASH_MD5)) {
/* The handshake MD5 hash state will fail the s2n_hash_is_available() check
@@ -47,11 +38,11 @@ int s2n_conn_update_handshake_hashes(struct s2n_connection *conn, struct s2n_blo
* PRF, which is required to comply with the TLS 1.0 and 1.1 RFCs and is approved
* as per NIST Special Publication 800-52 Revision 1.
*/
- GUARD(s2n_hash_update(&conn->handshake.md5, data->data, data->size));
+ POSIX_GUARD(s2n_hash_update(&hashes->md5, data->data, data->size));
}
if (s2n_handshake_is_hash_required(&conn->handshake, S2N_HASH_SHA1)) {
- GUARD(s2n_hash_update(&conn->handshake.sha1, data->data, data->size));
+ POSIX_GUARD(s2n_hash_update(&hashes->sha1, data->data, data->size));
}
const uint8_t md5_sha1_required = (s2n_handshake_is_hash_required(&conn->handshake, S2N_HASH_MD5) &&
@@ -63,33 +54,26 @@ int s2n_conn_update_handshake_hashes(struct s2n_connection *conn, struct s2n_blo
* CertificateVerify message and the PRF. NIST SP 800-52r1 approves use
* of MD5_SHA1 for these use cases (see footnotes 15 and 20, and section
* 3.3.2) */
- GUARD(s2n_hash_update(&conn->handshake.md5_sha1, data->data, data->size));
+ POSIX_GUARD(s2n_hash_update(&hashes->md5_sha1, data->data, data->size));
}
if (s2n_handshake_is_hash_required(&conn->handshake, S2N_HASH_SHA224)) {
- GUARD(s2n_hash_update(&conn->handshake.sha224, data->data, data->size));
+ POSIX_GUARD(s2n_hash_update(&hashes->sha224, data->data, data->size));
}
if (s2n_handshake_is_hash_required(&conn->handshake, S2N_HASH_SHA256)) {
- GUARD(s2n_hash_update(&conn->handshake.sha256, data->data, data->size));
+ POSIX_GUARD(s2n_hash_update(&hashes->sha256, data->data, data->size));
}
if (s2n_handshake_is_hash_required(&conn->handshake, S2N_HASH_SHA384)) {
- GUARD(s2n_hash_update(&conn->handshake.sha384, data->data, data->size));
+ POSIX_GUARD(s2n_hash_update(&hashes->sha384, data->data, data->size));
}
if (s2n_handshake_is_hash_required(&conn->handshake, S2N_HASH_SHA512)) {
- GUARD(s2n_hash_update(&conn->handshake.sha512, data->data, data->size));
+ POSIX_GUARD(s2n_hash_update(&hashes->sha512, data->data, data->size));
}
- /* Copy the CLIENT_HELLO -> SERVER_FINISHED hash.
- * TLS1.3 will need it later to calculate the application secrets. */
- if (s2n_connection_get_protocol_version(conn) >= S2N_TLS13 &&
- s2n_conn_get_current_message_type(conn) == SERVER_FINISHED) {
- GUARD(s2n_tls13_conn_copy_server_finished_hash(conn));
- }
-
- return 0;
+ return S2N_SUCCESS;
}
/* When a HelloRetryRequest message is used, the hash transcript needs to be recreated.
@@ -99,7 +83,9 @@ int s2n_conn_update_handshake_hashes(struct s2n_connection *conn, struct s2n_blo
*/
int s2n_server_hello_retry_recreate_transcript(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
+ struct s2n_handshake_hashes *hashes = conn->handshake.hashes;
+ POSIX_ENSURE_REF(hashes);
s2n_tls13_connection_keys(keys, conn);
uint8_t hash_digest_length = keys.size;
@@ -110,27 +96,22 @@ int s2n_server_hello_retry_recreate_transcript(struct s2n_connection *conn)
msghdr[MESSAGE_HASH_HEADER_LENGTH - 1] = hash_digest_length;
/* Grab the current transcript hash to use as the ClientHello1 value. */
- struct s2n_hash_state hash_state, client_hello1_hash;
- uint8_t client_hello1_digest_out[S2N_MAX_DIGEST_LEN];
- GUARD(s2n_handshake_get_hash_state(conn, keys.hash_algorithm, &hash_state));
-
- GUARD(s2n_hash_new(&client_hello1_hash));
- GUARD(s2n_hash_copy(&client_hello1_hash, &hash_state));
- GUARD(s2n_hash_digest(&client_hello1_hash, client_hello1_digest_out, hash_digest_length));
- GUARD(s2n_hash_free(&client_hello1_hash));
+ struct s2n_hash_state *client_hello1_hash = &hashes->hash_workspace;
+ uint8_t client_hello1_digest_out[S2N_MAX_DIGEST_LEN] = { 0 };
+ POSIX_GUARD_RESULT(s2n_handshake_copy_hash_state(conn, keys.hash_algorithm, client_hello1_hash));
+ POSIX_GUARD(s2n_hash_digest(client_hello1_hash, client_hello1_digest_out, hash_digest_length));
/* Step 1: Reset the hash state */
- GUARD(s2n_handshake_reset_hash_state(conn, keys.hash_algorithm));
+ POSIX_GUARD_RESULT(s2n_handshake_reset_hash_state(conn, keys.hash_algorithm));
/* Step 2: Update the transcript with the synthetic message */
struct s2n_blob msg_blob = {0};
- GUARD(s2n_blob_init(&msg_blob, msghdr, MESSAGE_HASH_HEADER_LENGTH));
- GUARD(s2n_conn_update_handshake_hashes(conn, &msg_blob));
+ POSIX_GUARD(s2n_blob_init(&msg_blob, msghdr, MESSAGE_HASH_HEADER_LENGTH));
+ POSIX_GUARD(s2n_conn_update_handshake_hashes(conn, &msg_blob));
/* Step 3: Update the transcript with the ClientHello1 hash */
- GUARD(s2n_blob_init(&msg_blob, client_hello1_digest_out, hash_digest_length));
- GUARD(s2n_conn_update_handshake_hashes(conn, &msg_blob));
+ POSIX_GUARD(s2n_blob_init(&msg_blob, client_hello1_digest_out, hash_digest_length));
+ POSIX_GUARD(s2n_conn_update_handshake_hashes(conn, &msg_blob));
- return 0;
+ return S2N_SUCCESS;
}
-
diff --git a/contrib/restricted/aws/s2n/tls/s2n_handshake_type.c b/contrib/restricted/aws/s2n/tls/s2n_handshake_type.c
new file mode 100644
index 0000000000..1ba27a3684
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_handshake_type.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include "tls/s2n_connection.h"
+#include "tls/s2n_handshake_type.h"
+#include "utils/s2n_safety.h"
+
+S2N_RESULT s2n_handshake_type_set_flag(struct s2n_connection *conn, s2n_handshake_type_flag flag)
+{
+ RESULT_ENSURE_REF(conn);
+ conn->handshake.handshake_type |= flag;
+ return S2N_RESULT_OK;
+}
+
+bool s2n_handshake_type_check_flag(struct s2n_connection *conn, s2n_handshake_type_flag flag)
+{
+ return conn && (conn->handshake.handshake_type & flag);
+}
+
+S2N_RESULT s2n_handshake_type_set_tls12_flag(struct s2n_connection *conn, s2n_tls12_handshake_type_flag flag)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE(s2n_connection_get_protocol_version(conn) < S2N_TLS13, S2N_ERR_HANDSHAKE_STATE);
+ conn->handshake.handshake_type |= flag;
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_handshake_type_unset_tls12_flag(struct s2n_connection *conn, s2n_tls12_handshake_type_flag flag)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE(s2n_connection_get_protocol_version(conn) < S2N_TLS13, S2N_ERR_HANDSHAKE_STATE);
+ conn->handshake.handshake_type &= ~(flag);
+ return S2N_RESULT_OK;
+}
+
+bool s2n_handshake_type_check_tls12_flag(struct s2n_connection *conn, s2n_tls12_handshake_type_flag flag)
+{
+ return conn && s2n_connection_get_protocol_version(conn) < S2N_TLS13
+ && (conn->handshake.handshake_type & flag);
+}
+
+S2N_RESULT s2n_handshake_type_set_tls13_flag(struct s2n_connection *conn, s2n_tls13_handshake_type_flag flag)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE(s2n_connection_get_protocol_version(conn) >= S2N_TLS13, S2N_ERR_HANDSHAKE_STATE);
+ conn->handshake.handshake_type |= flag;
+ return S2N_RESULT_OK;
+}
+
+bool s2n_handshake_type_check_tls13_flag(struct s2n_connection *conn, s2n_tls13_handshake_type_flag flag)
+{
+ return s2n_connection_get_protocol_version(conn) >= S2N_TLS13
+ && (conn->handshake.handshake_type & flag);
+}
+
+S2N_RESULT s2n_handshake_type_reset(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+ conn->handshake.handshake_type = 0;
+ return S2N_RESULT_OK;
+}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_handshake_type.h b/contrib/restricted/aws/s2n/tls/s2n_handshake_type.h
new file mode 100644
index 0000000000..2163457eb9
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_handshake_type.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#pragma once
+
+#include "utils/s2n_result.h"
+
+/* Maximum number of valid handshakes */
+#define S2N_HANDSHAKES_COUNT 256
+
+#define IS_NEGOTIATED(conn) \
+ ( s2n_handshake_type_check_flag(conn, NEGOTIATED) )
+
+#define IS_FULL_HANDSHAKE(conn) \
+ ( s2n_handshake_type_check_flag(conn, FULL_HANDSHAKE) )
+
+#define IS_RESUMPTION_HANDSHAKE(conn) \
+ ( !IS_FULL_HANDSHAKE(conn) && IS_NEGOTIATED(conn) )
+
+#define IS_CLIENT_AUTH_HANDSHAKE(conn) \
+ ( s2n_handshake_type_check_flag(conn, CLIENT_AUTH) )
+
+#define IS_CLIENT_AUTH_NO_CERT(conn) \
+ ( IS_CLIENT_AUTH_HANDSHAKE(conn) && s2n_handshake_type_check_flag(conn, NO_CLIENT_CERT) )
+
+#define IS_TLS12_PERFECT_FORWARD_SECRECY_HANDSHAKE(conn) \
+ ( s2n_handshake_type_check_tls12_flag(conn, TLS12_PERFECT_FORWARD_SECRECY) )
+
+#define IS_OCSP_STAPLED(conn) \
+ ( s2n_handshake_type_check_tls12_flag(conn, OCSP_STATUS) )
+
+#define IS_ISSUING_NEW_SESSION_TICKET(conn) \
+ ( s2n_handshake_type_check_tls12_flag(conn, WITH_SESSION_TICKET) )
+
+#define IS_HELLO_RETRY_HANDSHAKE(conn) \
+ ( s2n_handshake_type_check_tls13_flag(conn, HELLO_RETRY_REQUEST) )
+
+#define IS_MIDDLEBOX_COMPAT_MODE(conn) \
+ ( s2n_handshake_type_check_tls13_flag(conn, MIDDLEBOX_COMPAT) )
+
+#define WITH_EARLY_DATA(conn) \
+ ( s2n_handshake_type_check_tls13_flag(conn, WITH_EARLY_DATA) )
+
+#define WITH_EARLY_CLIENT_CCS(conn) \
+ ( s2n_handshake_type_check_tls13_flag(conn, EARLY_CLIENT_CCS) )
+
+typedef enum {
+ INITIAL = 0,
+ NEGOTIATED = 1,
+ FULL_HANDSHAKE = 2,
+ CLIENT_AUTH = 4,
+ NO_CLIENT_CERT = 8,
+} s2n_handshake_type_flag;
+
+S2N_RESULT s2n_handshake_type_set_flag(struct s2n_connection *conn, s2n_handshake_type_flag flag);
+bool s2n_handshake_type_check_flag(struct s2n_connection *conn, s2n_handshake_type_flag flag);
+
+typedef enum {
+ TLS12_PERFECT_FORWARD_SECRECY = 16,
+ OCSP_STATUS = 32,
+ WITH_SESSION_TICKET = 64,
+} s2n_tls12_handshake_type_flag;
+
+S2N_RESULT s2n_handshake_type_set_tls12_flag(struct s2n_connection *conn, s2n_tls12_handshake_type_flag flag);
+S2N_RESULT s2n_handshake_type_unset_tls12_flag(struct s2n_connection *conn, s2n_tls12_handshake_type_flag flag);
+bool s2n_handshake_type_check_tls12_flag(struct s2n_connection *conn, s2n_tls12_handshake_type_flag flag);
+
+typedef enum {
+ HELLO_RETRY_REQUEST = 16,
+ MIDDLEBOX_COMPAT = 32,
+ WITH_EARLY_DATA = 64,
+ EARLY_CLIENT_CCS = 128,
+} s2n_tls13_handshake_type_flag;
+
+S2N_RESULT s2n_handshake_type_set_tls13_flag(struct s2n_connection *conn, s2n_tls13_handshake_type_flag flag);
+bool s2n_handshake_type_check_tls13_flag(struct s2n_connection *conn, s2n_tls13_handshake_type_flag flag);
+
+S2N_RESULT s2n_handshake_type_reset(struct s2n_connection *conn);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_internal.h b/contrib/restricted/aws/s2n/tls/s2n_internal.h
new file mode 100644
index 0000000000..5728188f30
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_internal.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#pragma once
+
+#if ((__GNUC__ >= 4) || defined(__clang__)) && defined(S2N_EXPORTS)
+# define S2N_PRIVATE_API __attribute__((visibility("default")))
+#else
+# define S2N_PRIVATE_API
+#endif /* __GNUC__ >= 4 || defined(__clang__) */
+
+
+#include <stdint.h>
+
+/*
+ * Internal APIs.
+ *
+ * These APIs change the behavior of S2N in potentially dangerous ways and should only be
+ * used for testing purposes. All Internal APIs are subject to change without notice.
+ */
+
+struct s2n_config;
+struct s2n_connection;
+
+/*
+ * Gets the config set on the connection.
+ *
+ * This function will return a pointer to the config set by `s2n_connection_set_config`.
+ * It will return NULL prior to `s2n_connection_set_config` being called and a config
+ * being set by the application.
+ *
+ * Caution: A config can be associated with multiple connections and should not be
+ * modified after it has been built. Doing so is undefined behavior.
+ */
+S2N_PRIVATE_API
+extern int s2n_connection_get_config(struct s2n_connection *conn, struct s2n_config **config);
+
+/*
+ * Enable polling the async client_hello callback to make progress.
+ *
+ * `s2n_negotiate` must be called multiple times to poll the callback function
+ * and make progress.
+ */
+S2N_PRIVATE_API
+extern int s2n_config_client_hello_cb_enable_poll(struct s2n_config *config);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_kem.c b/contrib/restricted/aws/s2n/tls/s2n_kem.c
index aef1665918..8566502ccb 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_kem.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_kem.c
@@ -46,6 +46,18 @@ const struct s2n_kem s2n_bike1_l1_r2 = {
.decapsulate = &BIKE1_L1_R2_crypto_kem_dec,
};
+const struct s2n_kem s2n_bike_l1_r3 = {
+ .name = "BIKEr3-Level1",
+ .kem_extension_id = TLS_PQ_KEM_EXTENSION_ID_BIKE1_L1_R3,
+ .public_key_length = BIKE_L1_R3_PUBLIC_KEY_BYTES,
+ .private_key_length = BIKE_L1_R3_SECRET_KEY_BYTES,
+ .shared_secret_key_length = BIKE_L1_R3_SHARED_SECRET_BYTES,
+ .ciphertext_length = BIKE_L1_R3_CIPHERTEXT_BYTES,
+ .generate_keypair = &BIKE_L1_R3_crypto_kem_keypair,
+ .encapsulate = &BIKE_L1_R3_crypto_kem_enc,
+ .decapsulate = &BIKE_L1_R3_crypto_kem_dec,
+};
+
const struct s2n_kem s2n_sike_p503_r1 = {
.name = "SIKEp503r1-KEM",
.kem_extension_id = TLS_PQ_KEM_EXTENSION_ID_SIKE_P503_R1,
@@ -58,18 +70,6 @@ const struct s2n_kem s2n_sike_p503_r1 = {
.decapsulate = &SIKE_P503_r1_crypto_kem_dec,
};
-const struct s2n_kem s2n_sike_p434_r2 = {
- .name = "SIKEp434r2-KEM",
- .kem_extension_id = TLS_PQ_KEM_EXTENSION_ID_SIKE_P434_R2,
- .public_key_length = SIKE_P434_R2_PUBLIC_KEY_BYTES,
- .private_key_length = SIKE_P434_R2_SECRET_KEY_BYTES,
- .shared_secret_key_length = SIKE_P434_R2_SHARED_SECRET_BYTES,
- .ciphertext_length = SIKE_P434_R2_CIPHERTEXT_BYTES,
- .generate_keypair = &SIKE_P434_r2_crypto_kem_keypair,
- .encapsulate = &SIKE_P434_r2_crypto_kem_enc,
- .decapsulate = &SIKE_P434_r2_crypto_kem_dec,
-};
-
const struct s2n_kem s2n_kyber_512_r2 = {
.name = "kyber512r2",
.kem_extension_id = TLS_PQ_KEM_EXTENSION_ID_KYBER_512_R2,
@@ -94,23 +94,49 @@ const struct s2n_kem s2n_kyber_512_90s_r2 = {
.decapsulate = &kyber_512_90s_r2_crypto_kem_dec,
};
+const struct s2n_kem s2n_kyber_512_r3 = {
+ .name = "kyber512r3",
+ .kem_extension_id = TLS_PQ_KEM_EXTENSION_ID_KYBER_512_R3,
+ .public_key_length = S2N_KYBER_512_R3_PUBLIC_KEY_BYTES,
+ .private_key_length = S2N_KYBER_512_R3_SECRET_KEY_BYTES,
+ .shared_secret_key_length = S2N_KYBER_512_R3_SHARED_SECRET_BYTES,
+ .ciphertext_length = S2N_KYBER_512_R3_CIPHERTEXT_BYTES,
+ .generate_keypair = &s2n_kyber_512_r3_crypto_kem_keypair,
+ .encapsulate = &s2n_kyber_512_r3_crypto_kem_enc,
+ .decapsulate = &s2n_kyber_512_r3_crypto_kem_dec,
+};
+
+const struct s2n_kem s2n_sike_p434_r3 = {
+ .name = "SIKEp434r3-KEM",
+ .kem_extension_id = TLS_PQ_KEM_EXTENSION_ID_SIKE_P434_R3,
+ .public_key_length = S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES,
+ .private_key_length = S2N_SIKE_P434_R3_SECRET_KEY_BYTES,
+ .shared_secret_key_length = S2N_SIKE_P434_R3_SHARED_SECRET_BYTES,
+ .ciphertext_length = S2N_SIKE_P434_R3_CIPHERTEXT_BYTES,
+ .generate_keypair = &s2n_sike_p434_r3_crypto_kem_keypair,
+ .encapsulate = &s2n_sike_p434_r3_crypto_kem_enc,
+ .decapsulate = &s2n_sike_p434_r3_crypto_kem_dec,
+};
+
/* These lists should be kept up to date with the above KEMs. Order in the lists
* does not matter. Adding a KEM to these lists will not automatically enable
* support for the KEM extension - that must be added via the KEM preferences &
* security policies. These lists are applicable only to PQ-TLS 1.2. */
const struct s2n_kem *bike_kems[] = {
&s2n_bike1_l1_r1,
- &s2n_bike1_l1_r2
+ &s2n_bike1_l1_r2,
+ &s2n_bike_l1_r3
};
const struct s2n_kem *sike_kems[] = {
&s2n_sike_p503_r1,
- &s2n_sike_p434_r2,
+ &s2n_sike_p434_r3,
};
const struct s2n_kem *kyber_kems[] = {
&s2n_kyber_512_r2,
&s2n_kyber_512_90s_r2,
+ &s2n_kyber_512_r3,
};
const struct s2n_iana_to_kem kem_mapping[3] = {
@@ -136,26 +162,25 @@ const struct s2n_iana_to_kem kem_mapping[3] = {
* community to use values in the proposed reserved range defined in
* https://tools.ietf.org/html/draft-stebila-tls-hybrid-design.
* Values for interoperability are defined in
- * https://docs.google.com/spreadsheets/d/12YarzaNv3XQNLnvDsWLlRKwtZFhRrDdWf36YlzwrPeg/edit#gid=0.
+ * https://github.com/open-quantum-safe/openssl/blob/OQS-OpenSSL_1_1_1-stable/oqs-template/oqs-kem-info.md
*
* The structure of the hybrid share is:
* size of ECC key share (2 bytes)
* || ECC key share (variable bytes)
* || size of PQ key share (2 bytes)
* || PQ key share (variable bytes) */
-const struct s2n_kem_group s2n_secp256r1_sike_p434_r2 = {
- .name = "secp256r1_sike-p434-r2",
- .iana_id = TLS_PQ_KEM_GROUP_ID_SECP256R1_SIKE_P434_R2,
+const struct s2n_kem_group s2n_secp256r1_sike_p434_r3 = {
+ .name = "secp256r1_sike-p434-r3",
+ .iana_id = TLS_PQ_KEM_GROUP_ID_SECP256R1_SIKE_P434_R3,
.client_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + SECP256R1_SHARE_SIZE) +
- (S2N_SIZE_OF_KEY_SHARE_SIZE + SIKE_P434_R2_PUBLIC_KEY_BYTES),
+ (S2N_SIZE_OF_KEY_SHARE_SIZE + S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES),
.server_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + SECP256R1_SHARE_SIZE) +
- (S2N_SIZE_OF_KEY_SHARE_SIZE + SIKE_P434_R2_CIPHERTEXT_BYTES),
+ (S2N_SIZE_OF_KEY_SHARE_SIZE + S2N_SIKE_P434_R3_CIPHERTEXT_BYTES),
.curve = &s2n_ecc_curve_secp256r1,
- .kem = &s2n_sike_p434_r2,
+ .kem = &s2n_sike_p434_r3,
};
const struct s2n_kem_group s2n_secp256r1_bike1_l1_r2 = {
- /* The name string follows the convention in the above google doc */
.name = "secp256r1_bike-1l1fo-r2",
.iana_id = TLS_PQ_KEM_GROUP_ID_SECP256R1_BIKE1_L1_R2,
.client_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + SECP256R1_SHARE_SIZE) +
@@ -177,20 +202,41 @@ const struct s2n_kem_group s2n_secp256r1_kyber_512_r2 = {
.kem = &s2n_kyber_512_r2,
};
+const struct s2n_kem_group s2n_secp256r1_bike_l1_r3 = {
+ .name = "secp256r1_bike-l1-r3",
+ .iana_id = TLS_PQ_KEM_GROUP_ID_SECP256R1_BIKE_L1_R3,
+ .client_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + SECP256R1_SHARE_SIZE) +
+ (S2N_SIZE_OF_KEY_SHARE_SIZE + BIKE_L1_R3_PUBLIC_KEY_BYTES),
+ .server_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + SECP256R1_SHARE_SIZE) +
+ (S2N_SIZE_OF_KEY_SHARE_SIZE + BIKE_L1_R3_CIPHERTEXT_BYTES),
+ .curve = &s2n_ecc_curve_secp256r1,
+ .kem = &s2n_bike_l1_r3,
+};
+
+const struct s2n_kem_group s2n_secp256r1_kyber_512_r3 = {
+ .name = "secp256r1_kyber-512-r3",
+ .iana_id = TLS_PQ_KEM_GROUP_ID_SECP256R1_KYBER_512_R3,
+ .client_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + SECP256R1_SHARE_SIZE) +
+ (S2N_SIZE_OF_KEY_SHARE_SIZE + S2N_KYBER_512_R3_PUBLIC_KEY_BYTES),
+ .server_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + SECP256R1_SHARE_SIZE) +
+ (S2N_SIZE_OF_KEY_SHARE_SIZE + S2N_KYBER_512_R3_CIPHERTEXT_BYTES),
+ .curve = &s2n_ecc_curve_secp256r1,
+ .kem = &s2n_kyber_512_r3,
+};
+
#if EVP_APIS_SUPPORTED
-const struct s2n_kem_group s2n_x25519_sike_p434_r2 = {
- .name = "x25519_sike-p434-r2",
- .iana_id = TLS_PQ_KEM_GROUP_ID_X25519_SIKE_P434_R2,
+const struct s2n_kem_group s2n_x25519_sike_p434_r3 = {
+ .name = "x25519_sike-p434-r3",
+ .iana_id = TLS_PQ_KEM_GROUP_ID_X25519_SIKE_P434_R3,
.client_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + X25519_SHARE_SIZE) +
- (S2N_SIZE_OF_KEY_SHARE_SIZE + SIKE_P434_R2_PUBLIC_KEY_BYTES),
+ (S2N_SIZE_OF_KEY_SHARE_SIZE + S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES),
.server_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + X25519_SHARE_SIZE) +
- (S2N_SIZE_OF_KEY_SHARE_SIZE + SIKE_P434_R2_CIPHERTEXT_BYTES),
+ (S2N_SIZE_OF_KEY_SHARE_SIZE + S2N_SIKE_P434_R3_CIPHERTEXT_BYTES),
.curve = &s2n_ecc_curve_x25519,
- .kem = &s2n_sike_p434_r2,
+ .kem = &s2n_sike_p434_r3,
};
const struct s2n_kem_group s2n_x25519_bike1_l1_r2 = {
- /* The name string follows the convention in the above google doc */
.name = "x25519_bike-1l1fo-r2",
.iana_id = TLS_PQ_KEM_GROUP_ID_X25519_BIKE1_L1_R2,
.client_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + X25519_SHARE_SIZE) +
@@ -211,28 +257,70 @@ const struct s2n_kem_group s2n_x25519_kyber_512_r2 = {
.curve = &s2n_ecc_curve_x25519,
.kem = &s2n_kyber_512_r2,
};
+
+const struct s2n_kem_group s2n_x25519_bike_l1_r3 = {
+ .name = "x25519_bike-l1-r3",
+ .iana_id = TLS_PQ_KEM_GROUP_ID_X25519_BIKE_L1_R3,
+ .client_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + X25519_SHARE_SIZE) +
+ (S2N_SIZE_OF_KEY_SHARE_SIZE + BIKE_L1_R3_PUBLIC_KEY_BYTES),
+ .server_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + X25519_SHARE_SIZE) +
+ (S2N_SIZE_OF_KEY_SHARE_SIZE + BIKE_L1_R3_CIPHERTEXT_BYTES),
+ .curve = &s2n_ecc_curve_x25519,
+ .kem = &s2n_bike_l1_r3,
+};
+
+const struct s2n_kem_group s2n_x25519_kyber_512_r3 = {
+ .name = "x25519_kyber-512-r3",
+ .iana_id = TLS_PQ_KEM_GROUP_ID_X25519_KYBER_512_R3,
+ .client_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + X25519_SHARE_SIZE) +
+ (S2N_SIZE_OF_KEY_SHARE_SIZE + S2N_KYBER_512_R3_PUBLIC_KEY_BYTES),
+ .server_share_size = (S2N_SIZE_OF_KEY_SHARE_SIZE + X25519_SHARE_SIZE) +
+ (S2N_SIZE_OF_KEY_SHARE_SIZE + S2N_KYBER_512_R3_CIPHERTEXT_BYTES),
+ .curve = &s2n_ecc_curve_x25519,
+ .kem = &s2n_kyber_512_r3,
+};
+
+
#else
-const struct s2n_kem_group s2n_x25519_sike_p434_r2 = { 0 };
const struct s2n_kem_group s2n_x25519_bike1_l1_r2 = { 0 };
const struct s2n_kem_group s2n_x25519_kyber_512_r2 = { 0 };
+const struct s2n_kem_group s2n_x25519_sike_p434_r3 = { 0 };
+const struct s2n_kem_group s2n_x25519_bike_l1_r3 = { 0 };
+const struct s2n_kem_group s2n_x25519_kyber_512_r3 = { 0 };
#endif
+const struct s2n_kem_group* ALL_SUPPORTED_KEM_GROUPS[S2N_SUPPORTED_KEM_GROUPS_COUNT] = {
+ &s2n_secp256r1_bike_l1_r3,
+ &s2n_secp256r1_sike_p434_r3,
+ &s2n_secp256r1_kyber_512_r3,
+ &s2n_secp256r1_bike1_l1_r2,
+ &s2n_secp256r1_kyber_512_r2,
+/* x25519 based tls13_kem_groups require EVP_APIS_SUPPORTED */
+#if EVP_APIS_SUPPORTED
+ &s2n_x25519_bike_l1_r3,
+ &s2n_x25519_sike_p434_r3,
+ &s2n_x25519_kyber_512_r3,
+ &s2n_x25519_bike1_l1_r2,
+ &s2n_x25519_kyber_512_r2
+#endif
+};
+
/* Helper safety macro to call the NIST PQ KEM functions. The NIST
* functions may return any non-zero value to indicate failure. */
-#define GUARD_PQ_AS_RESULT(x) ENSURE((x) == 0, S2N_ERR_PQ_CRYPTO)
+#define GUARD_PQ_AS_RESULT(x) RESULT_ENSURE((x) == 0, S2N_ERR_PQ_CRYPTO)
S2N_RESULT s2n_kem_generate_keypair(struct s2n_kem_params *kem_params)
{
- ENSURE_REF(kem_params);
- ENSURE_REF(kem_params->kem);
+ RESULT_ENSURE_REF(kem_params);
+ RESULT_ENSURE_REF(kem_params->kem);
const struct s2n_kem *kem = kem_params->kem;
- ENSURE_REF(kem->generate_keypair);
+ RESULT_ENSURE_REF(kem->generate_keypair);
- ENSURE_REF(kem_params->public_key.data);
- ENSURE(kem_params->public_key.size == kem->public_key_length, S2N_ERR_SAFETY);
+ RESULT_ENSURE_REF(kem_params->public_key.data);
+ RESULT_ENSURE(kem_params->public_key.size == kem->public_key_length, S2N_ERR_SAFETY);
/* Need to save the private key for decapsulation */
- GUARD_AS_RESULT(s2n_alloc(&kem_params->private_key, kem->private_key_length));
+ RESULT_GUARD_POSIX(s2n_realloc(&kem_params->private_key, kem->private_key_length));
GUARD_PQ_AS_RESULT(kem->generate_keypair(kem_params->public_key.data, kem_params->private_key.data));
return S2N_RESULT_OK;
@@ -240,20 +328,20 @@ S2N_RESULT s2n_kem_generate_keypair(struct s2n_kem_params *kem_params)
S2N_RESULT s2n_kem_encapsulate(struct s2n_kem_params *kem_params, struct s2n_blob *ciphertext)
{
- ENSURE_REF(kem_params);
- ENSURE_REF(kem_params->kem);
+ RESULT_ENSURE_REF(kem_params);
+ RESULT_ENSURE_REF(kem_params->kem);
const struct s2n_kem *kem = kem_params->kem;
- ENSURE_REF(kem->encapsulate);
+ RESULT_ENSURE_REF(kem->encapsulate);
- ENSURE(kem_params->public_key.size == kem->public_key_length, S2N_ERR_SAFETY);
- ENSURE_REF(kem_params->public_key.data);
+ RESULT_ENSURE(kem_params->public_key.size == kem->public_key_length, S2N_ERR_SAFETY);
+ RESULT_ENSURE_REF(kem_params->public_key.data);
- ENSURE_REF(ciphertext);
- ENSURE_REF(ciphertext->data);
- ENSURE(ciphertext->size == kem->ciphertext_length, S2N_ERR_SAFETY);
+ RESULT_ENSURE_REF(ciphertext);
+ RESULT_ENSURE_REF(ciphertext->data);
+ RESULT_ENSURE(ciphertext->size == kem->ciphertext_length, S2N_ERR_SAFETY);
/* Need to save the shared secret for key derivation */
- GUARD_AS_RESULT(s2n_alloc(&(kem_params->shared_secret), kem->shared_secret_key_length));
+ RESULT_GUARD_POSIX(s2n_alloc(&(kem_params->shared_secret), kem->shared_secret_key_length));
GUARD_PQ_AS_RESULT(kem->encapsulate(ciphertext->data, kem_params->shared_secret.data, kem_params->public_key.data));
return S2N_RESULT_OK;
@@ -261,20 +349,20 @@ S2N_RESULT s2n_kem_encapsulate(struct s2n_kem_params *kem_params, struct s2n_blo
S2N_RESULT s2n_kem_decapsulate(struct s2n_kem_params *kem_params, const struct s2n_blob *ciphertext)
{
- ENSURE_REF(kem_params);
- ENSURE_REF(kem_params->kem);
+ RESULT_ENSURE_REF(kem_params);
+ RESULT_ENSURE_REF(kem_params->kem);
const struct s2n_kem *kem = kem_params->kem;
- ENSURE_REF(kem->decapsulate);
+ RESULT_ENSURE_REF(kem->decapsulate);
- ENSURE(kem_params->private_key.size == kem->private_key_length, S2N_ERR_SAFETY);
- ENSURE_REF(kem_params->private_key.data);
+ RESULT_ENSURE(kem_params->private_key.size == kem->private_key_length, S2N_ERR_SAFETY);
+ RESULT_ENSURE_REF(kem_params->private_key.data);
- ENSURE_REF(ciphertext);
- ENSURE_REF(ciphertext->data);
- ENSURE(ciphertext->size == kem->ciphertext_length, S2N_ERR_SAFETY);
+ RESULT_ENSURE_REF(ciphertext);
+ RESULT_ENSURE_REF(ciphertext->data);
+ RESULT_ENSURE(ciphertext->size == kem->ciphertext_length, S2N_ERR_SAFETY);
/* Need to save the shared secret for key derivation */
- GUARD_AS_RESULT(s2n_alloc(&(kem_params->shared_secret), kem->shared_secret_key_length));
+ RESULT_GUARD_POSIX(s2n_alloc(&(kem_params->shared_secret), kem->shared_secret_key_length));
GUARD_PQ_AS_RESULT(kem->decapsulate(kem_params->shared_secret.data, ciphertext->data, kem_params->private_key.data));
return S2N_RESULT_OK;
@@ -283,7 +371,7 @@ S2N_RESULT s2n_kem_decapsulate(struct s2n_kem_params *kem_params, const struct s
static int s2n_kem_check_kem_compatibility(const uint8_t iana_value[S2N_TLS_CIPHER_SUITE_LEN], const struct s2n_kem *candidate_kem,
uint8_t *kem_is_compatible) {
const struct s2n_iana_to_kem *compatible_kems = NULL;
- GUARD(s2n_cipher_suite_to_kem(iana_value, &compatible_kems));
+ POSIX_GUARD(s2n_cipher_suite_to_kem(iana_value, &compatible_kems));
for (uint8_t i = 0; i < compatible_kems->kem_count; i++) {
if (candidate_kem->kem_extension_id == compatible_kems->kems[i]->kem_extension_id) {
@@ -299,8 +387,8 @@ static int s2n_kem_check_kem_compatibility(const uint8_t iana_value[S2N_TLS_CIPH
int s2n_choose_kem_with_peer_pref_list(const uint8_t iana_value[S2N_TLS_CIPHER_SUITE_LEN], struct s2n_blob *client_kem_ids,
const struct s2n_kem *server_kem_pref_list[], const uint8_t num_server_supported_kems, const struct s2n_kem **chosen_kem) {
struct s2n_stuffer client_kem_ids_stuffer = {0};
- GUARD(s2n_stuffer_init(&client_kem_ids_stuffer, client_kem_ids));
- GUARD(s2n_stuffer_write(&client_kem_ids_stuffer, client_kem_ids));
+ POSIX_GUARD(s2n_stuffer_init(&client_kem_ids_stuffer, client_kem_ids));
+ POSIX_GUARD(s2n_stuffer_write(&client_kem_ids_stuffer, client_kem_ids));
/* Each KEM ID is 2 bytes */
uint8_t num_client_candidate_kems = client_kem_ids->size / 2;
@@ -309,7 +397,7 @@ int s2n_choose_kem_with_peer_pref_list(const uint8_t iana_value[S2N_TLS_CIPHER_S
const struct s2n_kem *candidate_server_kem = (server_kem_pref_list[i]);
uint8_t server_kem_is_compatible = 0;
- GUARD(s2n_kem_check_kem_compatibility(iana_value, candidate_server_kem, &server_kem_is_compatible));
+ POSIX_GUARD(s2n_kem_check_kem_compatibility(iana_value, candidate_server_kem, &server_kem_is_compatible));
if (!server_kem_is_compatible) {
continue;
@@ -317,25 +405,25 @@ int s2n_choose_kem_with_peer_pref_list(const uint8_t iana_value[S2N_TLS_CIPHER_S
for (uint8_t j = 0; j < num_client_candidate_kems; j++) {
kem_extension_size candidate_client_kem_id;
- GUARD(s2n_stuffer_read_uint16(&client_kem_ids_stuffer, &candidate_client_kem_id));
+ POSIX_GUARD(s2n_stuffer_read_uint16(&client_kem_ids_stuffer, &candidate_client_kem_id));
if (candidate_server_kem->kem_extension_id == candidate_client_kem_id) {
*chosen_kem = candidate_server_kem;
return S2N_SUCCESS;
}
}
- GUARD(s2n_stuffer_reread(&client_kem_ids_stuffer));
+ POSIX_GUARD(s2n_stuffer_reread(&client_kem_ids_stuffer));
}
/* Client and server did not propose any mutually supported KEMs compatible with the ciphersuite */
- S2N_ERROR(S2N_ERR_KEM_UNSUPPORTED_PARAMS);
+ POSIX_BAIL(S2N_ERR_KEM_UNSUPPORTED_PARAMS);
}
int s2n_choose_kem_without_peer_pref_list(const uint8_t iana_value[S2N_TLS_CIPHER_SUITE_LEN], const struct s2n_kem *server_kem_pref_list[],
const uint8_t num_server_supported_kems, const struct s2n_kem **chosen_kem) {
for (uint8_t i = 0; i < num_server_supported_kems; i++) {
uint8_t kem_is_compatible = 0;
- GUARD(s2n_kem_check_kem_compatibility(iana_value, server_kem_pref_list[i], &kem_is_compatible));
+ POSIX_GUARD(s2n_kem_check_kem_compatibility(iana_value, server_kem_pref_list[i], &kem_is_compatible));
if (kem_is_compatible) {
*chosen_kem = server_kem_pref_list[i];
return S2N_SUCCESS;
@@ -343,23 +431,23 @@ int s2n_choose_kem_without_peer_pref_list(const uint8_t iana_value[S2N_TLS_CIPHE
}
/* The server preference list did not contain any KEM extensions compatible with the ciphersuite */
- S2N_ERROR(S2N_ERR_KEM_UNSUPPORTED_PARAMS);
+ POSIX_BAIL(S2N_ERR_KEM_UNSUPPORTED_PARAMS);
}
int s2n_kem_free(struct s2n_kem_params *kem_params)
{
if (kem_params != NULL) {
- GUARD(s2n_blob_zeroize_free(&kem_params->private_key));
- GUARD(s2n_blob_zeroize_free(&kem_params->public_key));
- GUARD(s2n_blob_zeroize_free(&kem_params->shared_secret));
+ POSIX_GUARD(s2n_blob_zeroize_free(&kem_params->private_key));
+ POSIX_GUARD(s2n_blob_zeroize_free(&kem_params->public_key));
+ POSIX_GUARD(s2n_blob_zeroize_free(&kem_params->shared_secret));
}
return S2N_SUCCESS;
}
int s2n_kem_group_free(struct s2n_kem_group_params *kem_group_params) {
if (kem_group_params != NULL) {
- GUARD(s2n_kem_free(&kem_group_params->kem_params));
- GUARD(s2n_ecc_evp_params_free(&kem_group_params->ecc_params));
+ POSIX_GUARD(s2n_kem_free(&kem_group_params->kem_params));
+ POSIX_GUARD(s2n_ecc_evp_params_free(&kem_group_params->ecc_params));
}
return S2N_SUCCESS;
}
@@ -372,7 +460,7 @@ int s2n_cipher_suite_to_kem(const uint8_t iana_value[S2N_TLS_CIPHER_SUITE_LEN],
return S2N_SUCCESS;
}
}
- S2N_ERROR(S2N_ERR_KEM_UNSUPPORTED_PARAMS);
+ POSIX_BAIL(S2N_ERR_KEM_UNSUPPORTED_PARAMS);
}
int s2n_get_kem_from_extension_id(kem_extension_size kem_id, const struct s2n_kem **kem) {
@@ -388,26 +476,26 @@ int s2n_get_kem_from_extension_id(kem_extension_size kem_id, const struct s2n_ke
}
}
- S2N_ERROR(S2N_ERR_KEM_UNSUPPORTED_PARAMS);
+ POSIX_BAIL(S2N_ERR_KEM_UNSUPPORTED_PARAMS);
}
int s2n_kem_send_public_key(struct s2n_stuffer *out, struct s2n_kem_params *kem_params) {
- notnull_check(out);
- notnull_check(kem_params);
- notnull_check(kem_params->kem);
+ POSIX_ENSURE_REF(out);
+ POSIX_ENSURE_REF(kem_params);
+ POSIX_ENSURE_REF(kem_params->kem);
const struct s2n_kem *kem = kem_params->kem;
- GUARD(s2n_stuffer_write_uint16(out, kem->public_key_length));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, kem->public_key_length));
/* We don't need to store the public key after sending it.
* We write it directly to *out. */
kem_params->public_key.data = s2n_stuffer_raw_write(out, kem->public_key_length);
- notnull_check(kem_params->public_key.data);
+ POSIX_ENSURE_REF(kem_params->public_key.data);
kem_params->public_key.size = kem->public_key_length;
/* Saves the private key in kem_params */
- GUARD_AS_POSIX(s2n_kem_generate_keypair(kem_params));
+ POSIX_GUARD_RESULT(s2n_kem_generate_keypair(kem_params));
/* After using s2n_stuffer_raw_write() above to write the public
* key to the stuffer, we want to ensure that kem_params->public_key.data
@@ -420,61 +508,61 @@ int s2n_kem_send_public_key(struct s2n_stuffer *out, struct s2n_kem_params *kem_
}
int s2n_kem_recv_public_key(struct s2n_stuffer *in, struct s2n_kem_params *kem_params) {
- notnull_check(in);
- notnull_check(kem_params);
- notnull_check(kem_params->kem);
+ POSIX_ENSURE_REF(in);
+ POSIX_ENSURE_REF(kem_params);
+ POSIX_ENSURE_REF(kem_params->kem);
const struct s2n_kem *kem = kem_params->kem;
kem_public_key_size public_key_length;
- GUARD(s2n_stuffer_read_uint16(in, &public_key_length));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &public_key_length));
S2N_ERROR_IF(public_key_length != kem->public_key_length, S2N_ERR_BAD_MESSAGE);
/* Alloc memory for the public key; the peer receiving it will need it
* later during the handshake to encapsulate the shared secret. */
- GUARD(s2n_alloc(&(kem_params->public_key), public_key_length));
- GUARD(s2n_stuffer_read_bytes(in, kem_params->public_key.data, public_key_length));
+ POSIX_GUARD(s2n_alloc(&(kem_params->public_key), public_key_length));
+ POSIX_GUARD(s2n_stuffer_read_bytes(in, kem_params->public_key.data, public_key_length));
return S2N_SUCCESS;
}
int s2n_kem_send_ciphertext(struct s2n_stuffer *out, struct s2n_kem_params *kem_params) {
- notnull_check(out);
- notnull_check(kem_params);
- notnull_check(kem_params->kem);
- notnull_check(kem_params->public_key.data);
+ POSIX_ENSURE_REF(out);
+ POSIX_ENSURE_REF(kem_params);
+ POSIX_ENSURE_REF(kem_params->kem);
+ POSIX_ENSURE_REF(kem_params->public_key.data);
const struct s2n_kem *kem = kem_params->kem;
- GUARD(s2n_stuffer_write_uint16(out, kem->ciphertext_length));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, kem->ciphertext_length));
/* Ciphertext will get written to *out */
struct s2n_blob ciphertext = {.data = s2n_stuffer_raw_write(out, kem->ciphertext_length), .size = kem->ciphertext_length};
- notnull_check(ciphertext.data);
+ POSIX_ENSURE_REF(ciphertext.data);
/* Saves the shared secret in kem_params */
- GUARD_AS_POSIX(s2n_kem_encapsulate(kem_params, &ciphertext));
+ POSIX_GUARD_RESULT(s2n_kem_encapsulate(kem_params, &ciphertext));
return S2N_SUCCESS;
}
int s2n_kem_recv_ciphertext(struct s2n_stuffer *in, struct s2n_kem_params *kem_params) {
- notnull_check(in);
- notnull_check(kem_params);
- notnull_check(kem_params->kem);
- notnull_check(kem_params->private_key.data);
+ POSIX_ENSURE_REF(in);
+ POSIX_ENSURE_REF(kem_params);
+ POSIX_ENSURE_REF(kem_params->kem);
+ POSIX_ENSURE_REF(kem_params->private_key.data);
const struct s2n_kem *kem = kem_params->kem;
kem_ciphertext_key_size ciphertext_length;
- GUARD(s2n_stuffer_read_uint16(in, &ciphertext_length));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &ciphertext_length));
S2N_ERROR_IF(ciphertext_length != kem->ciphertext_length, S2N_ERR_BAD_MESSAGE);
const struct s2n_blob ciphertext = {.data = s2n_stuffer_raw_read(in, ciphertext_length), .size = ciphertext_length};
- notnull_check(ciphertext.data);
+ POSIX_ENSURE_REF(ciphertext.data);
/* Saves the shared secret in kem_params */
- GUARD_AS_POSIX(s2n_kem_decapsulate(kem_params, &ciphertext));
+ POSIX_GUARD_RESULT(s2n_kem_decapsulate(kem_params, &ciphertext));
return S2N_SUCCESS;
}
@@ -483,27 +571,35 @@ int s2n_kem_recv_ciphertext(struct s2n_stuffer *in, struct s2n_kem_params *kem_p
/* If S2N_NO_PQ was defined at compile time, the PQ KEM code will have been entirely excluded
* from compilation. We define stubs of these functions here to error if they are called. */
/* sikep503r1 */
-int SIKE_P503_r1_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
-int SIKE_P503_r1_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
-int SIKE_P503_r1_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
-/* sikep434r2 */
-int SIKE_P434_r2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
-int SIKE_P434_r2_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
-int SIKE_P434_r2_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
+int SIKE_P503_r1_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int SIKE_P503_r1_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int SIKE_P503_r1_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
/* bike1l1r1 */
-int BIKE1_L1_R1_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
-int BIKE1_L1_R1_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
-int BIKE1_L1_R1_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
+int BIKE1_L1_R1_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int BIKE1_L1_R1_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int BIKE1_L1_R1_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
/* bike1l1r2*/
-int BIKE1_L1_R2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
-int BIKE1_L1_R2_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
-int BIKE1_L1_R2_crypto_kem_dec(OUT unsigned char * ss, IN const unsigned char *ct, IN const unsigned char *sk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
+int BIKE1_L1_R2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int BIKE1_L1_R2_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int BIKE1_L1_R2_crypto_kem_dec(OUT unsigned char * ss, IN const unsigned char *ct, IN const unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+/* bike1l1r3*/
+int BIKE_L1_R3_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int BIKE_L1_R3_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int BIKE_L1_R3_crypto_kem_dec(OUT unsigned char * ss, IN const unsigned char *ct, IN const unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
/* kyber512r2 */
-int kyber_512_r2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
-int kyber_512_r2_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
-int kyber_512_r2_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
+int kyber_512_r2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int kyber_512_r2_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int kyber_512_r2_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
/* kyber512r2 90's version*/
-int kyber_512_90s_r2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
-int kyber_512_90s_r2_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
-int kyber_512_90s_r2_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk) { BAIL_POSIX(S2N_ERR_UNIMPLEMENTED); }
+int kyber_512_90s_r2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int kyber_512_90s_r2_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int kyber_512_90s_r2_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+/* kyber512r3 */
+int s2n_kyber_512_r3_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int s2n_kyber_512_r3_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int s2n_kyber_512_r3_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+/* sikep434r3 */
+int s2n_sike_p434_r3_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int s2n_sike_p434_r3_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
+int s2n_sike_p434_r3_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); }
#endif
diff --git a/contrib/restricted/aws/s2n/tls/s2n_kem.h b/contrib/restricted/aws/s2n/tls/s2n_kem.h
index ddea9d09ca..ca6ab90cf3 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_kem.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_kem.h
@@ -73,25 +73,37 @@ struct s2n_kem_group_params {
extern const struct s2n_kem s2n_bike1_l1_r1;
extern const struct s2n_kem s2n_bike1_l1_r2;
+extern const struct s2n_kem s2n_bike_l1_r3;
extern const struct s2n_kem s2n_sike_p503_r1;
-extern const struct s2n_kem s2n_sike_p434_r2;
extern const struct s2n_kem s2n_kyber_512_r2;
extern const struct s2n_kem s2n_kyber_512_90s_r2;
+extern const struct s2n_kem s2n_kyber_512_r3;
+extern const struct s2n_kem s2n_sike_p434_r3;
/* x25519 based tls13_kem_groups require EVP_APIS_SUPPORTED */
#if EVP_APIS_SUPPORTED
-#define S2N_SUPPORTED_KEM_GROUPS_COUNT 6
+#define S2N_SUPPORTED_KEM_GROUPS_COUNT 10
#else
-#define S2N_SUPPORTED_KEM_GROUPS_COUNT 3
+#define S2N_SUPPORTED_KEM_GROUPS_COUNT 5
#endif
-extern const struct s2n_kem_group s2n_secp256r1_sike_p434_r2;
+extern const struct s2n_kem_group* ALL_SUPPORTED_KEM_GROUPS[S2N_SUPPORTED_KEM_GROUPS_COUNT];
+
+/* secp256r1 KEM Groups */
+extern const struct s2n_kem_group s2n_secp256r1_bike_l1_r3;
+extern const struct s2n_kem_group s2n_secp256r1_sike_p434_r3;
+extern const struct s2n_kem_group s2n_secp256r1_kyber_512_r3;
extern const struct s2n_kem_group s2n_secp256r1_bike1_l1_r2;
extern const struct s2n_kem_group s2n_secp256r1_kyber_512_r2;
-extern const struct s2n_kem_group s2n_x25519_sike_p434_r2;
+
+/* x25519 KEM Groups */
+extern const struct s2n_kem_group s2n_x25519_bike_l1_r3;
+extern const struct s2n_kem_group s2n_x25519_sike_p434_r3;
+extern const struct s2n_kem_group s2n_x25519_kyber_512_r3;
extern const struct s2n_kem_group s2n_x25519_bike1_l1_r2;
extern const struct s2n_kem_group s2n_x25519_kyber_512_r2;
+
extern S2N_RESULT s2n_kem_generate_keypair(struct s2n_kem_params *kem_params);
extern S2N_RESULT s2n_kem_encapsulate(struct s2n_kem_params *kem_params, struct s2n_blob *ciphertext);
extern S2N_RESULT s2n_kem_decapsulate(struct s2n_kem_params *kem_params, const struct s2n_blob *ciphertext);
@@ -141,16 +153,7 @@ int SIKE_P503_r1_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk
int SIKE_P503_r1_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk);
int SIKE_P503_r1_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk);
-/* sikep434r2 */
-#define SIKE_P434_R2_PUBLIC_KEY_BYTES 330
-#define SIKE_P434_R2_SECRET_KEY_BYTES 374
-#define SIKE_P434_R2_CIPHERTEXT_BYTES 346
-#define SIKE_P434_R2_SHARED_SECRET_BYTES 16
-int SIKE_P434_r2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk);
-int SIKE_P434_r2_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk);
-int SIKE_P434_r2_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk);
-
-/* bike1l1r1 */
+/* bike1_l1_r1 */
#define BIKE1_L1_R1_SECRET_KEY_BYTES 3110
#define BIKE1_L1_R1_PUBLIC_KEY_BYTES 2542
#define BIKE1_L1_R1_CIPHERTEXT_BYTES 2542
@@ -159,7 +162,7 @@ int BIKE1_L1_R1_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk)
int BIKE1_L1_R1_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk);
int BIKE1_L1_R1_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk);
-/* bike1l1r2 */
+/* bike1_l1_r2 */
#define BIKE1_L1_R2_SECRET_KEY_BYTES 6460
#define BIKE1_L1_R2_PUBLIC_KEY_BYTES 2946
#define BIKE1_L1_R2_CIPHERTEXT_BYTES 2946
@@ -168,6 +171,15 @@ int BIKE1_L1_R2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk)
int BIKE1_L1_R2_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk);
int BIKE1_L1_R2_crypto_kem_dec(OUT unsigned char * ss, IN const unsigned char *ct, IN const unsigned char *sk);
+/* bike_l1_r3 */
+#define BIKE_L1_R3_SECRET_KEY_BYTES 5223
+#define BIKE_L1_R3_PUBLIC_KEY_BYTES 1541
+#define BIKE_L1_R3_CIPHERTEXT_BYTES 1573
+#define BIKE_L1_R3_SHARED_SECRET_BYTES 32
+int BIKE_L1_R3_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk);
+int BIKE_L1_R3_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk);
+int BIKE_L1_R3_crypto_kem_dec(OUT unsigned char * ss, IN const unsigned char *ct, IN const unsigned char *sk);
+
/* kyber512r2 (the defined constants are identical for both regular and 90's version) */
#define KYBER_512_R2_PUBLIC_KEY_BYTES 800
#define KYBER_512_R2_SECRET_KEY_BYTES 1632
@@ -179,3 +191,21 @@ int kyber_512_r2_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *c
int kyber_512_90s_r2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk);
int kyber_512_90s_r2_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk);
int kyber_512_90s_r2_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk);
+
+/* kyber512r3 */
+#define S2N_KYBER_512_R3_PUBLIC_KEY_BYTES 800
+#define S2N_KYBER_512_R3_SECRET_KEY_BYTES 1632
+#define S2N_KYBER_512_R3_CIPHERTEXT_BYTES 768
+#define S2N_KYBER_512_R3_SHARED_SECRET_BYTES 32
+int s2n_kyber_512_r3_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk);
+int s2n_kyber_512_r3_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk);
+int s2n_kyber_512_r3_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk);
+
+/* sikep434r3 */
+#define S2N_SIKE_P434_R3_PUBLIC_KEY_BYTES 330
+#define S2N_SIKE_P434_R3_SECRET_KEY_BYTES 374
+#define S2N_SIKE_P434_R3_CIPHERTEXT_BYTES 346
+#define S2N_SIKE_P434_R3_SHARED_SECRET_BYTES 16
+int s2n_sike_p434_r3_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk);
+int s2n_sike_p434_r3_crypto_kem_enc(OUT unsigned char *ct, OUT unsigned char *ss, IN const unsigned char *pk);
+int s2n_sike_p434_r3_crypto_kem_dec(OUT unsigned char *ss, IN const unsigned char *ct, IN const unsigned char *sk);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_kem_preferences.c b/contrib/restricted/aws/s2n/tls/s2n_kem_preferences.c
index 3e3fa71323..24ce8cb49e 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_kem_preferences.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_kem_preferences.c
@@ -21,18 +21,35 @@ const struct s2n_kem *pq_kems_r1[2] = {
&s2n_sike_p503_r1,
};
-/* Extension list for round 2 and round 1 PQ KEMs, in order of preference */
+/* Extension list for round 2 and round 1 PQ KEMs, in order of preference.
+ * s2n_sike_p434_r3 is compatible with, and has replaced, s2n_sike_p434_r2. */
const struct s2n_kem *pq_kems_r2r1[4] = {
&s2n_bike1_l1_r2,
- &s2n_sike_p434_r2,
+ &s2n_sike_p434_r3,
&s2n_bike1_l1_r1,
&s2n_sike_p503_r1,
};
+/* s2n_sike_p434_r3 is compatible with, and has replaced, s2n_sike_p434_r2. */
const struct s2n_kem *pq_kems_r2r1_2020_07[5] = {
&s2n_kyber_512_r2,
&s2n_bike1_l1_r2,
- &s2n_sike_p434_r2,
+ &s2n_sike_p434_r3,
+ &s2n_bike1_l1_r1,
+ &s2n_sike_p503_r1,
+};
+
+const struct s2n_kem *pq_kems_r3r2r1_2021_05[7] = {
+ /* Round 3 Algorithms */
+ &s2n_kyber_512_r3,
+ &s2n_bike_l1_r3,
+
+ /* Round 2 Algorithms */
+ &s2n_kyber_512_r2,
+ &s2n_bike1_l1_r2,
+ &s2n_sike_p434_r3,
+
+ /* Round 1 Algorithms */
&s2n_bike1_l1_r1,
&s2n_sike_p503_r1,
};
@@ -43,24 +60,48 @@ const struct s2n_kem *pq_kems_sike_r1[1] = {
};
/* Extension list for SIKE P434 Round 2 and SIKE P503 Round 1 only (for testing),
- * in order of preference */
+ * in order of preference. s2n_sike_p434_r3 is compatible with, and has replaced,
+ * s2n_sike_p434_r2. */
const struct s2n_kem *pq_kems_sike_r2r1[2] = {
- &s2n_sike_p434_r2,
+ &s2n_sike_p434_r3,
&s2n_sike_p503_r1,
};
+/* sike_p434_r3 has replaced sike_p434_r2 in all KEM groups */
const struct s2n_kem_group *pq_kem_groups_r2[] = {
#if EVP_APIS_SUPPORTED
&s2n_x25519_kyber_512_r2,
&s2n_secp256r1_kyber_512_r2,
&s2n_x25519_bike1_l1_r2,
&s2n_secp256r1_bike1_l1_r2,
- &s2n_x25519_sike_p434_r2,
- &s2n_secp256r1_sike_p434_r2,
+ &s2n_x25519_sike_p434_r3,
+ &s2n_secp256r1_sike_p434_r3,
+#else
+ &s2n_secp256r1_kyber_512_r2,
+ &s2n_secp256r1_bike1_l1_r2,
+ &s2n_secp256r1_sike_p434_r3,
+#endif
+};
+
+
+const struct s2n_kem_group *pq_kem_groups_r3r2[] = {
+#if EVP_APIS_SUPPORTED
+ &s2n_x25519_kyber_512_r3,
+ &s2n_secp256r1_kyber_512_r3,
+ &s2n_x25519_bike_l1_r3,
+ &s2n_secp256r1_bike_l1_r3,
+ &s2n_x25519_kyber_512_r2,
+ &s2n_secp256r1_kyber_512_r2,
+ &s2n_x25519_bike1_l1_r2,
+ &s2n_secp256r1_bike1_l1_r2,
+ &s2n_x25519_sike_p434_r3,
+ &s2n_secp256r1_sike_p434_r3,
#else
+ &s2n_secp256r1_kyber_512_r3,
+ &s2n_secp256r1_bike_l1_r3,
&s2n_secp256r1_kyber_512_r2,
&s2n_secp256r1_bike1_l1_r2,
- &s2n_secp256r1_sike_p434_r2,
+ &s2n_secp256r1_sike_p434_r3,
#endif
};
@@ -110,6 +151,13 @@ const struct s2n_kem_preferences kem_preferences_pq_tls_1_0_2020_12 = {
.tls13_kem_groups = pq_kem_groups_r2,
};
+const struct s2n_kem_preferences kem_preferences_pq_tls_1_0_2021_05 = {
+ .kem_count = s2n_array_len(pq_kems_r3r2r1_2021_05),
+ .kems = pq_kems_r3r2r1_2021_05,
+ .tls13_kem_group_count = s2n_array_len(pq_kem_groups_r3r2),
+ .tls13_kem_groups = pq_kem_groups_r3r2,
+};
+
const struct s2n_kem_preferences kem_preferences_null = {
.kem_count = 0,
.kems = NULL,
diff --git a/contrib/restricted/aws/s2n/tls/s2n_kem_preferences.h b/contrib/restricted/aws/s2n/tls/s2n_kem_preferences.h
index cad4f39c8e..73f86a10a1 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_kem_preferences.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_kem_preferences.h
@@ -34,8 +34,10 @@ extern const struct s2n_kem *pq_kems_r2r1[4];
extern const struct s2n_kem *pq_kems_r2r1_2020_07[5];
extern const struct s2n_kem *pq_kems_sike_r1[1];
extern const struct s2n_kem *pq_kems_sike_r2r1[2];
+extern const struct s2n_kem *pq_kems_r3r2r1_2021_05[7];
extern const struct s2n_kem_group *pq_kem_groups_r2[];
+extern const struct s2n_kem_group *pq_kem_groups_r3r2[];
extern const struct s2n_kem_preferences kem_preferences_kms_pq_tls_1_0_2019_06;
extern const struct s2n_kem_preferences kem_preferences_kms_pq_tls_1_0_2020_02;
@@ -43,6 +45,7 @@ extern const struct s2n_kem_preferences kem_preferences_kms_pq_tls_1_0_2020_07;
extern const struct s2n_kem_preferences kem_preferences_pq_sike_test_tls_1_0_2019_11;
extern const struct s2n_kem_preferences kem_preferences_pq_sike_test_tls_1_0_2020_02;
extern const struct s2n_kem_preferences kem_preferences_pq_tls_1_0_2020_12;
+extern const struct s2n_kem_preferences kem_preferences_pq_tls_1_0_2021_05;
extern const struct s2n_kem_preferences kem_preferences_null;
bool s2n_kem_preferences_includes_tls13_kem_group(const struct s2n_kem_preferences *kem_preferences,
diff --git a/contrib/restricted/aws/s2n/tls/s2n_kex.c b/contrib/restricted/aws/s2n/tls/s2n_kex.c
index b9fb9800ad..d4429ad2f8 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_kex.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_kex.c
@@ -26,9 +26,9 @@
static S2N_RESULT s2n_check_rsa_key(const struct s2n_cipher_suite *cipher_suite, struct s2n_connection *conn, bool *is_supported)
{
- ENSURE_REF(cipher_suite);
- ENSURE_REF(conn);
- ENSURE_REF(is_supported);
+ RESULT_ENSURE_REF(cipher_suite);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(is_supported);
*is_supported = s2n_get_compatible_cert_chain_and_key(conn, S2N_PKEY_TYPE_RSA) != NULL;
@@ -37,10 +37,10 @@ static S2N_RESULT s2n_check_rsa_key(const struct s2n_cipher_suite *cipher_suite,
static S2N_RESULT s2n_check_dhe(const struct s2n_cipher_suite *cipher_suite, struct s2n_connection *conn, bool *is_supported)
{
- ENSURE_REF(cipher_suite);
- ENSURE_REF(conn);
- ENSURE_REF(conn->config);
- ENSURE_REF(is_supported);
+ RESULT_ENSURE_REF(cipher_suite);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn->config);
+ RESULT_ENSURE_REF(is_supported);
*is_supported = conn->config->dhparams != NULL;
@@ -49,27 +49,27 @@ static S2N_RESULT s2n_check_dhe(const struct s2n_cipher_suite *cipher_suite, str
static S2N_RESULT s2n_check_ecdhe(const struct s2n_cipher_suite *cipher_suite, struct s2n_connection *conn, bool *is_supported)
{
- ENSURE_REF(cipher_suite);
- ENSURE_REF(conn);
- ENSURE_REF(is_supported);
+ RESULT_ENSURE_REF(cipher_suite);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(is_supported);
- *is_supported = conn->secure.server_ecc_evp_params.negotiated_curve != NULL;
+ *is_supported = conn->kex_params.server_ecc_evp_params.negotiated_curve != NULL;
return S2N_RESULT_OK;
}
static S2N_RESULT s2n_check_kem(const struct s2n_cipher_suite *cipher_suite, struct s2n_connection *conn, bool *is_supported)
{
- ENSURE_REF(cipher_suite);
- ENSURE_REF(conn);
- ENSURE_REF(is_supported);
+ RESULT_ENSURE_REF(cipher_suite);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(is_supported);
/* If any of the necessary conditions are not met, we will return early and indicate KEM is not supported. */
*is_supported = false;
const struct s2n_kem_preferences *kem_preferences = NULL;
- GUARD_AS_RESULT(s2n_connection_get_kem_preferences(conn, &kem_preferences));
- ENSURE_REF(kem_preferences);
+ RESULT_GUARD_POSIX(s2n_connection_get_kem_preferences(conn, &kem_preferences));
+ RESULT_ENSURE_REF(kem_preferences);
if (!s2n_pq_is_enabled() || kem_preferences->kem_count == 0) {
return S2N_RESULT_OK;
@@ -80,12 +80,12 @@ static S2N_RESULT s2n_check_kem(const struct s2n_cipher_suite *cipher_suite, str
return S2N_RESULT_OK;
}
- ENSURE_REF(supported_params);
+ RESULT_ENSURE_REF(supported_params);
if (supported_params->kem_count == 0) {
return S2N_RESULT_OK;
}
- struct s2n_blob *client_kem_pref_list = &(conn->secure.client_pq_kem_extension);
+ struct s2n_blob *client_kem_pref_list = &(conn->kex_params.client_pq_kem_extension);
const struct s2n_kem *chosen_kem = NULL;
if (client_kem_pref_list == NULL || client_kem_pref_list->data == NULL) {
/* If the client did not send a PQ KEM extension, then the server can pick its preferred parameter */
@@ -107,28 +107,28 @@ static S2N_RESULT s2n_check_kem(const struct s2n_cipher_suite *cipher_suite, str
static S2N_RESULT s2n_configure_kem(const struct s2n_cipher_suite *cipher_suite, struct s2n_connection *conn)
{
- ENSURE_REF(cipher_suite);
- ENSURE_REF(conn);
+ RESULT_ENSURE_REF(cipher_suite);
+ RESULT_ENSURE_REF(conn);
- ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ RESULT_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
const struct s2n_kem_preferences *kem_preferences = NULL;
- GUARD_AS_RESULT(s2n_connection_get_kem_preferences(conn, &kem_preferences));
- ENSURE_REF(kem_preferences);
+ RESULT_GUARD_POSIX(s2n_connection_get_kem_preferences(conn, &kem_preferences));
+ RESULT_ENSURE_REF(kem_preferences);
- struct s2n_blob *proposed_kems = &(conn->secure.client_pq_kem_extension);
+ struct s2n_blob *proposed_kems = &(conn->kex_params.client_pq_kem_extension);
const struct s2n_kem *chosen_kem = NULL;
if (proposed_kems == NULL || proposed_kems->data == NULL) {
/* If the client did not send a PQ KEM extension, then the server can pick its preferred parameter */
- GUARD_AS_RESULT(s2n_choose_kem_without_peer_pref_list(cipher_suite->iana_value, kem_preferences->kems,
+ RESULT_GUARD_POSIX(s2n_choose_kem_without_peer_pref_list(cipher_suite->iana_value, kem_preferences->kems,
kem_preferences->kem_count, &chosen_kem));
} else {
/* If the client did send a PQ KEM extension, then the server must find a mutually supported parameter. */
- GUARD_AS_RESULT(s2n_choose_kem_with_peer_pref_list(cipher_suite->iana_value, proposed_kems, kem_preferences->kems,
+ RESULT_GUARD_POSIX(s2n_choose_kem_with_peer_pref_list(cipher_suite->iana_value, proposed_kems, kem_preferences->kems,
kem_preferences->kem_count, &chosen_kem));
}
- conn->secure.kem_params.kem = chosen_kem;
+ conn->kex_params.kem_params.kem = chosen_kem;
return S2N_RESULT_OK;
}
@@ -139,14 +139,14 @@ static S2N_RESULT s2n_no_op_configure(const struct s2n_cipher_suite *cipher_suit
static S2N_RESULT s2n_check_hybrid_ecdhe_kem(const struct s2n_cipher_suite *cipher_suite, struct s2n_connection *conn, bool *is_supported)
{
- ENSURE_REF(cipher_suite);
- ENSURE_REF(conn);
- ENSURE_REF(is_supported);
+ RESULT_ENSURE_REF(cipher_suite);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(is_supported);
bool ecdhe_supported = false;
bool kem_supported = false;
- GUARD_RESULT(s2n_check_ecdhe(cipher_suite, conn, &ecdhe_supported));
- GUARD_RESULT(s2n_check_kem(cipher_suite, conn, &kem_supported));
+ RESULT_GUARD(s2n_check_ecdhe(cipher_suite, conn, &ecdhe_supported));
+ RESULT_GUARD(s2n_check_kem(cipher_suite, conn, &kem_supported));
*is_supported = ecdhe_supported && kem_supported;
@@ -173,7 +173,7 @@ const struct s2n_kex s2n_rsa = {
.server_key_send = NULL,
.client_key_recv = &s2n_rsa_client_key_recv,
.client_key_send = &s2n_rsa_client_key_send,
- .prf = &s2n_tls_prf_master_secret,
+ .prf = &s2n_prf_calculate_master_secret,
};
const struct s2n_kex s2n_dhe = {
@@ -185,7 +185,7 @@ const struct s2n_kex s2n_dhe = {
.server_key_send = &s2n_dhe_server_key_send,
.client_key_recv = &s2n_dhe_client_key_recv,
.client_key_send = &s2n_dhe_client_key_send,
- .prf = &s2n_tls_prf_master_secret,
+ .prf = &s2n_prf_calculate_master_secret,
};
const struct s2n_kex s2n_ecdhe = {
@@ -197,7 +197,7 @@ const struct s2n_kex s2n_ecdhe = {
.server_key_send = &s2n_ecdhe_server_key_send,
.client_key_recv = &s2n_ecdhe_client_key_recv,
.client_key_send = &s2n_ecdhe_client_key_send,
- .prf = &s2n_tls_prf_master_secret,
+ .prf = &s2n_prf_calculate_master_secret,
};
const struct s2n_kex s2n_hybrid_ecdhe_kem = {
@@ -215,33 +215,33 @@ const struct s2n_kex s2n_hybrid_ecdhe_kem = {
S2N_RESULT s2n_kex_supported(const struct s2n_cipher_suite *cipher_suite, struct s2n_connection *conn, bool *is_supported)
{
- ENSURE_REF(cipher_suite);
- ENSURE_REF(cipher_suite->key_exchange_alg);
- ENSURE_REF(cipher_suite->key_exchange_alg->connection_supported);
- ENSURE_REF(conn);
- ENSURE_REF(is_supported);
+ RESULT_ENSURE_REF(cipher_suite);
+ RESULT_ENSURE_REF(cipher_suite->key_exchange_alg);
+ RESULT_ENSURE_REF(cipher_suite->key_exchange_alg->connection_supported);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(is_supported);
- GUARD_RESULT(cipher_suite->key_exchange_alg->connection_supported(cipher_suite, conn, is_supported));
+ RESULT_GUARD(cipher_suite->key_exchange_alg->connection_supported(cipher_suite, conn, is_supported));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_configure_kex(const struct s2n_cipher_suite *cipher_suite, struct s2n_connection *conn)
{
- ENSURE_REF(cipher_suite);
- ENSURE_REF(cipher_suite->key_exchange_alg);
- ENSURE_REF(cipher_suite->key_exchange_alg->configure_connection);
- ENSURE_REF(conn);
+ RESULT_ENSURE_REF(cipher_suite);
+ RESULT_ENSURE_REF(cipher_suite->key_exchange_alg);
+ RESULT_ENSURE_REF(cipher_suite->key_exchange_alg->configure_connection);
+ RESULT_ENSURE_REF(conn);
- GUARD_RESULT(cipher_suite->key_exchange_alg->configure_connection(cipher_suite, conn));
+ RESULT_GUARD(cipher_suite->key_exchange_alg->configure_connection(cipher_suite, conn));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_kex_is_ephemeral(const struct s2n_kex *kex, bool *is_ephemeral)
{
- ENSURE_REF(kex);
- ENSURE_REF(is_ephemeral);
+ RESULT_ENSURE_REF(kex);
+ RESULT_ENSURE_REF(is_ephemeral);
*is_ephemeral = kex->is_ephemeral;
@@ -250,12 +250,12 @@ S2N_RESULT s2n_kex_is_ephemeral(const struct s2n_kex *kex, bool *is_ephemeral)
S2N_RESULT s2n_kex_server_key_recv_parse_data(const struct s2n_kex *kex, struct s2n_connection *conn, struct s2n_kex_raw_server_data *raw_server_data)
{
- ENSURE_REF(kex);
- ENSURE_REF(kex->server_key_recv_parse_data);
- ENSURE_REF(conn);
- ENSURE_REF(raw_server_data);
+ RESULT_ENSURE_REF(kex);
+ RESULT_ENSURE_REF(kex->server_key_recv_parse_data);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(raw_server_data);
- GUARD_AS_RESULT(kex->server_key_recv_parse_data(conn, raw_server_data));
+ RESULT_GUARD_POSIX(kex->server_key_recv_parse_data(conn, raw_server_data));
return S2N_RESULT_OK;
}
@@ -263,60 +263,60 @@ S2N_RESULT s2n_kex_server_key_recv_parse_data(const struct s2n_kex *kex, struct
S2N_RESULT s2n_kex_server_key_recv_read_data(const struct s2n_kex *kex, struct s2n_connection *conn, struct s2n_blob *data_to_verify,
struct s2n_kex_raw_server_data *raw_server_data)
{
- ENSURE_REF(kex);
- ENSURE_REF(kex->server_key_recv_read_data);
- ENSURE_REF(conn);
- ENSURE_REF(data_to_verify);
+ RESULT_ENSURE_REF(kex);
+ RESULT_ENSURE_REF(kex->server_key_recv_read_data);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(data_to_verify);
- GUARD_AS_RESULT(kex->server_key_recv_read_data(conn, data_to_verify, raw_server_data));
+ RESULT_GUARD_POSIX(kex->server_key_recv_read_data(conn, data_to_verify, raw_server_data));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_kex_server_key_send(const struct s2n_kex *kex, struct s2n_connection *conn, struct s2n_blob *data_to_sign)
{
- ENSURE_REF(kex);
- ENSURE_REF(kex->server_key_send);
- ENSURE_REF(conn);
- ENSURE_REF(data_to_sign);
+ RESULT_ENSURE_REF(kex);
+ RESULT_ENSURE_REF(kex->server_key_send);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(data_to_sign);
- GUARD_AS_RESULT(kex->server_key_send(conn, data_to_sign));
+ RESULT_GUARD_POSIX(kex->server_key_send(conn, data_to_sign));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_kex_client_key_recv(const struct s2n_kex *kex, struct s2n_connection *conn, struct s2n_blob *shared_key)
{
- ENSURE_REF(kex);
- ENSURE_REF(kex->client_key_recv);
- ENSURE_REF(conn);
- ENSURE_REF(shared_key);
+ RESULT_ENSURE_REF(kex);
+ RESULT_ENSURE_REF(kex->client_key_recv);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(shared_key);
- GUARD_AS_RESULT(kex->client_key_recv(conn, shared_key));
+ RESULT_GUARD_POSIX(kex->client_key_recv(conn, shared_key));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_kex_client_key_send(const struct s2n_kex *kex, struct s2n_connection *conn, struct s2n_blob *shared_key)
{
- ENSURE_REF(kex);
- ENSURE_REF(kex->client_key_send);
- ENSURE_REF(conn);
- ENSURE_REF(shared_key);
+ RESULT_ENSURE_REF(kex);
+ RESULT_ENSURE_REF(kex->client_key_send);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(shared_key);
- GUARD_AS_RESULT(kex->client_key_send(conn, shared_key));
+ RESULT_GUARD_POSIX(kex->client_key_send(conn, shared_key));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_kex_tls_prf(const struct s2n_kex *kex, struct s2n_connection *conn, struct s2n_blob *premaster_secret)
{
- ENSURE_REF(kex);
- ENSURE_REF(kex->prf);
- ENSURE_REF(conn);
- ENSURE_REF(premaster_secret);
+ RESULT_ENSURE_REF(kex);
+ RESULT_ENSURE_REF(kex->prf);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(premaster_secret);
- GUARD_AS_RESULT(kex->prf(conn, premaster_secret));
+ RESULT_GUARD_POSIX(kex->prf(conn, premaster_secret));
return S2N_RESULT_OK;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_key_log.c b/contrib/restricted/aws/s2n/tls/s2n_key_log.c
new file mode 100644
index 0000000000..d8eb3dd465
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_key_log.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+/**
+ * This module implements key logging as defined by the NSS Key Log Format
+ *
+ * See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format
+ *
+ * This key log file is a series of lines. Comment lines begin with a sharp
+ * character ('#') and are ignored. Secrets follow the format
+ * <Label> <space> <ClientRandom> <space> <Secret> where:
+ *
+ * <Label> describes the following secret.
+ * <ClientRandom> is 32 bytes Random value from the Client Hello message, encoded as 64 hexadecimal characters.
+ * <Secret> depends on the Label (see below).
+ *
+ * The following labels are defined, followed by a description of the secret:
+ *
+ * RSA: 48 bytes for the premaster secret, encoded as 96 hexadecimal characters (removed in NSS 3.34)
+ * CLIENT_RANDOM: 48 bytes for the master secret, encoded as 96 hexadecimal characters (for SSL 3.0, TLS 1.0, 1.1 and 1.2)
+ * CLIENT_EARLY_TRAFFIC_SECRET: the hex-encoded early traffic secret for the client side (for TLS 1.3)
+ * CLIENT_HANDSHAKE_TRAFFIC_SECRET: the hex-encoded handshake traffic secret for the client side (for TLS 1.3)
+ * SERVER_HANDSHAKE_TRAFFIC_SECRET: the hex-encoded handshake traffic secret for the server side (for TLS 1.3)
+ * CLIENT_TRAFFIC_SECRET_0: the first hex-encoded application traffic secret for the client side (for TLS 1.3)
+ * SERVER_TRAFFIC_SECRET_0: the first hex-encoded application traffic secret for the server side (for TLS 1.3)
+ * EARLY_EXPORTER_SECRET: the hex-encoded early exporter secret (for TLS 1.3).
+ * EXPORTER_SECRET: the hex-encoded exporter secret (for TLS 1.3)
+ */
+
+#include "api/s2n.h"
+#include "tls/s2n_config.h"
+#include "tls/s2n_connection.h"
+#include "tls/s2n_crypto_constants.h"
+#include "tls/s2n_quic_support.h" /* this currently holds the s2n_secret_type_t enum */
+#include "utils/s2n_blob.h"
+#include "utils/s2n_safety.h"
+
+/* hex requires 2 chars per byte */
+#define HEX_ENCODING_SIZE 2
+
+S2N_RESULT s2n_key_log_hex_encode(struct s2n_stuffer *output, uint8_t *bytes, size_t len)
+{
+ RESULT_ENSURE_MUT(output);
+ RESULT_ENSURE_REF(bytes);
+
+ const uint8_t chars[] = "0123456789abcdef";
+
+ for (size_t i = 0; i < len; i++) {
+ uint8_t upper = bytes[i] >> 4;
+ uint8_t lower = bytes[i] & 0x0f;
+
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint8(output, chars[upper]));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint8(output, chars[lower]));
+ }
+
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_key_log_tls13_secret(struct s2n_connection *conn, const struct s2n_blob *secret, s2n_secret_type_t secret_type)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn->config);
+ RESULT_ENSURE_REF(secret);
+
+ /* only emit keys if the callback has been set */
+ if (!conn->config->key_log_cb) {
+ return S2N_RESULT_OK;
+ }
+
+ const uint8_t client_early_traffic_label[] = "CLIENT_EARLY_TRAFFIC_SECRET ";
+ const uint8_t client_handshake_label[] = "CLIENT_HANDSHAKE_TRAFFIC_SECRET ";
+ const uint8_t server_handshake_label[] = "SERVER_HANDSHAKE_TRAFFIC_SECRET ";
+ const uint8_t client_traffic_label[] = "CLIENT_TRAFFIC_SECRET_0 ";
+ const uint8_t server_traffic_label[] = "SERVER_TRAFFIC_SECRET_0 ";
+
+ const uint8_t *label = NULL;
+ uint8_t label_size = 0;
+
+ switch (secret_type) {
+ case S2N_CLIENT_EARLY_TRAFFIC_SECRET:
+ label = client_early_traffic_label;
+ label_size = sizeof(client_early_traffic_label) - 1;
+ break;
+ case S2N_CLIENT_HANDSHAKE_TRAFFIC_SECRET:
+ label = client_handshake_label;
+ label_size = sizeof(client_handshake_label) - 1;
+ break;
+ case S2N_SERVER_HANDSHAKE_TRAFFIC_SECRET:
+ label = server_handshake_label;
+ label_size = sizeof(server_handshake_label) - 1;
+ break;
+ case S2N_CLIENT_APPLICATION_TRAFFIC_SECRET:
+ label = client_traffic_label;
+ label_size = sizeof(client_traffic_label) - 1;
+ break;
+ case S2N_SERVER_APPLICATION_TRAFFIC_SECRET:
+ label = server_traffic_label;
+ label_size = sizeof(server_traffic_label) - 1;
+ break;
+ default:
+ /* Ignore the secret types we don't understand */
+ return S2N_RESULT_OK;
+ }
+
+ const uint8_t len
+ = label_size
+ + S2N_TLS_RANDOM_DATA_LEN * HEX_ENCODING_SIZE
+ + 1 /* SPACE */
+ + secret->size * HEX_ENCODING_SIZE;
+
+ DEFER_CLEANUP(struct s2n_stuffer output, s2n_stuffer_free);
+ RESULT_GUARD_POSIX(s2n_stuffer_alloc(&output, len));
+
+ RESULT_GUARD_POSIX(s2n_stuffer_write_bytes(&output, label, label_size));
+ RESULT_GUARD(s2n_key_log_hex_encode(&output, conn->handshake_params.client_random, S2N_TLS_RANDOM_DATA_LEN));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint8(&output, ' '));
+ RESULT_GUARD(s2n_key_log_hex_encode(&output, secret->data, secret->size));
+
+ uint8_t *data = s2n_stuffer_raw_read(&output, len);
+ RESULT_ENSURE_REF(data);
+
+ conn->config->key_log_cb(conn->config->key_log_ctx, conn, data, len);
+
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_key_log_tls12_secret(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn->config);
+
+ /* only emit keys if the callback has been set */
+ if (!conn->config->key_log_cb) {
+ return S2N_RESULT_OK;
+ }
+
+ /* CLIENT_RANDOM: 48 bytes for the master secret, encoded as 96 hexadecimal characters (for SSL 3.0, TLS 1.0, 1.1 and 1.2) */
+ const uint8_t label[] = "CLIENT_RANDOM ";
+ const uint8_t label_size = sizeof(label) - 1;
+
+ const uint8_t len
+ = label_size
+ + S2N_TLS_RANDOM_DATA_LEN * HEX_ENCODING_SIZE
+ + 1 /* SPACE */
+ + S2N_TLS_SECRET_LEN * HEX_ENCODING_SIZE;
+
+ DEFER_CLEANUP(struct s2n_stuffer output, s2n_stuffer_free);
+ RESULT_GUARD_POSIX(s2n_stuffer_alloc(&output, len));
+
+ RESULT_GUARD_POSIX(s2n_stuffer_write_bytes(&output, label, label_size));
+ RESULT_GUARD(s2n_key_log_hex_encode(&output, conn->handshake_params.client_random, S2N_TLS_RANDOM_DATA_LEN));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint8(&output, ' '));
+ RESULT_GUARD(s2n_key_log_hex_encode(&output, conn->secrets.tls12.master_secret, S2N_TLS_SECRET_LEN));
+
+ uint8_t *data = s2n_stuffer_raw_read(&output, len);
+ RESULT_ENSURE_REF(data);
+
+ conn->config->key_log_cb(conn->config->key_log_ctx, conn, data, len);
+
+ return S2N_RESULT_OK;
+}
+
diff --git a/contrib/restricted/aws/s2n/tls/s2n_key_log.h b/contrib/restricted/aws/s2n/tls/s2n_key_log.h
new file mode 100644
index 0000000000..d804d21c4c
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_key_log.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#pragma once
+
+#include "api/s2n.h"
+#include "stuffer/s2n_stuffer.h"
+#include "tls/s2n_quic_support.h"
+#include "utils/s2n_blob.h"
+#include "utils/s2n_safety.h"
+
+S2N_RESULT s2n_key_log_hex_encode(struct s2n_stuffer *output, uint8_t *bytes, size_t len);
+S2N_RESULT s2n_key_log_tls12_secret(struct s2n_connection *conn);
+S2N_RESULT s2n_key_log_tls13_secret(struct s2n_connection *conn, const struct s2n_blob *secret, s2n_secret_type_t secret_type);
+
diff --git a/contrib/restricted/aws/s2n/tls/s2n_key_update.c b/contrib/restricted/aws/s2n/tls/s2n_key_update.c
index 6f63e02b38..86e933a1ac 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_key_update.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_key_update.c
@@ -19,6 +19,7 @@
#include "tls/s2n_key_update.h"
#include "tls/s2n_tls13_handshake.h"
#include "tls/s2n_record.h"
+#include "tls/s2n_tls.h"
#include "crypto/s2n_sequence.h"
@@ -30,52 +31,55 @@ int s2n_check_record_limit(struct s2n_connection *conn, struct s2n_blob *sequenc
int s2n_key_update_recv(struct s2n_connection *conn, struct s2n_stuffer *request)
{
- notnull_check(conn);
- ENSURE_POSIX(!conn->config->quic_enabled, S2N_ERR_BAD_MESSAGE);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE(conn->actual_protocol_version >= S2N_TLS13, S2N_ERR_BAD_MESSAGE);
+ POSIX_ENSURE(!s2n_connection_is_quic_enabled(conn), S2N_ERR_BAD_MESSAGE);
uint8_t key_update_request;
- GUARD(s2n_stuffer_read_uint8(request, &key_update_request));
+ POSIX_GUARD(s2n_stuffer_read_uint8(request, &key_update_request));
S2N_ERROR_IF(key_update_request != S2N_KEY_UPDATE_NOT_REQUESTED && key_update_request != S2N_KEY_UPDATE_REQUESTED,
S2N_ERR_BAD_MESSAGE);
conn->key_update_pending = key_update_request;
/* Update peer's key since a key_update was received */
if (conn->mode == S2N_CLIENT){
- GUARD(s2n_update_application_traffic_keys(conn, S2N_SERVER, RECEIVING));
+ POSIX_GUARD(s2n_update_application_traffic_keys(conn, S2N_SERVER, RECEIVING));
} else {
- GUARD(s2n_update_application_traffic_keys(conn, S2N_CLIENT, RECEIVING));
+ POSIX_GUARD(s2n_update_application_traffic_keys(conn, S2N_CLIENT, RECEIVING));
}
return S2N_SUCCESS;
}
-int s2n_key_update_send(struct s2n_connection *conn)
+int s2n_key_update_send(struct s2n_connection *conn, s2n_blocked_status *blocked)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
struct s2n_blob sequence_number = {0};
if (conn->mode == S2N_CLIENT) {
- GUARD(s2n_blob_init(&sequence_number, conn->secure.client_sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
+ POSIX_GUARD(s2n_blob_init(&sequence_number, conn->secure.client_sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
} else {
- GUARD(s2n_blob_init(&sequence_number, conn->secure.server_sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
+ POSIX_GUARD(s2n_blob_init(&sequence_number, conn->secure.server_sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
}
- GUARD(s2n_check_record_limit(conn, &sequence_number));
+ POSIX_GUARD(s2n_check_record_limit(conn, &sequence_number));
if (conn->key_update_pending) {
uint8_t key_update_data[S2N_KEY_UPDATE_MESSAGE_SIZE];
struct s2n_blob key_update_blob = {0};
- GUARD(s2n_blob_init(&key_update_blob, key_update_data, sizeof(key_update_data)));
+ POSIX_GUARD(s2n_blob_init(&key_update_blob, key_update_data, sizeof(key_update_data)));
/* Write key update message */
- GUARD(s2n_key_update_write(&key_update_blob));
+ POSIX_GUARD(s2n_key_update_write(&key_update_blob));
/* Encrypt the message */
- GUARD(s2n_record_write(conn, TLS_HANDSHAKE, &key_update_blob));
+ POSIX_GUARD(s2n_record_write(conn, TLS_HANDSHAKE, &key_update_blob));
/* Update encryption key */
- GUARD(s2n_update_application_traffic_keys(conn, conn->mode, SENDING));
+ POSIX_GUARD(s2n_update_application_traffic_keys(conn, conn->mode, SENDING));
conn->key_update_pending = false;
+
+ POSIX_GUARD(s2n_flush(conn, blocked));
}
return S2N_SUCCESS;
@@ -83,28 +87,28 @@ int s2n_key_update_send(struct s2n_connection *conn)
int s2n_key_update_write(struct s2n_blob *out)
{
- notnull_check(out);
+ POSIX_ENSURE_REF(out);
struct s2n_stuffer key_update_stuffer = {0};
- GUARD(s2n_stuffer_init(&key_update_stuffer, out));
- GUARD(s2n_stuffer_write_uint8(&key_update_stuffer, TLS_KEY_UPDATE));
- GUARD(s2n_stuffer_write_uint24(&key_update_stuffer, S2N_KEY_UPDATE_LENGTH));
+ POSIX_GUARD(s2n_stuffer_init(&key_update_stuffer, out));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&key_update_stuffer, TLS_KEY_UPDATE));
+ POSIX_GUARD(s2n_stuffer_write_uint24(&key_update_stuffer, S2N_KEY_UPDATE_LENGTH));
/* s2n currently does not require peers to update their encryption keys. */
- GUARD(s2n_stuffer_write_uint8(&key_update_stuffer, S2N_KEY_UPDATE_NOT_REQUESTED));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&key_update_stuffer, S2N_KEY_UPDATE_NOT_REQUESTED));
return S2N_SUCCESS;
}
int s2n_check_record_limit(struct s2n_connection *conn, struct s2n_blob *sequence_number)
{
- notnull_check(conn);
- notnull_check(sequence_number);
- notnull_check(conn->secure.cipher_suite);
- notnull_check(conn->secure.cipher_suite->record_alg);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(sequence_number);
+ POSIX_ENSURE_REF(conn->secure.cipher_suite);
+ POSIX_ENSURE_REF(conn->secure.cipher_suite->record_alg);
uint64_t output = 0;
- GUARD(s2n_sequence_number_to_uint64(sequence_number, &output));
+ POSIX_GUARD(s2n_sequence_number_to_uint64(sequence_number, &output));
if (output + 1 > conn->secure.cipher_suite->record_alg->encryption_limit) {
conn->key_update_pending = true;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_key_update.h b/contrib/restricted/aws/s2n/tls/s2n_key_update.h
index 20ac48411c..859cd11506 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_key_update.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_key_update.h
@@ -31,4 +31,4 @@ typedef enum {
} keyupdate_request;
int s2n_key_update_recv(struct s2n_connection *conn, struct s2n_stuffer *request);
-int s2n_key_update_send(struct s2n_connection *conn);
+int s2n_key_update_send(struct s2n_connection *conn, s2n_blocked_status *blocked);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_ocsp_stapling.c b/contrib/restricted/aws/s2n/tls/s2n_ocsp_stapling.c
index 7ad96a4d31..441fdff27e 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_ocsp_stapling.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_ocsp_stapling.c
@@ -28,7 +28,7 @@
int s2n_server_status_send(struct s2n_connection *conn)
{
if (s2n_server_can_send_ocsp(conn)) {
- GUARD(s2n_server_certificate_status_send(conn, &conn->handshake.io));
+ POSIX_GUARD(s2n_server_certificate_status_send(conn, &conn->handshake.io));
}
return 0;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_post_handshake.c b/contrib/restricted/aws/s2n/tls/s2n_post_handshake.c
index 34594576db..7801920563 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_post_handshake.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_post_handshake.c
@@ -20,38 +20,56 @@
#include "tls/s2n_tls.h"
#include "utils/s2n_safety.h"
-/* TLS 1.3 introducted several post handshake messages. This function currently only
- * supports parsing for the KeyUpdate message. Once the other post-handshake messages
- * have been implemented, this function can be altered to include the other messages.
- */
int s2n_post_handshake_recv(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
uint8_t post_handshake_id;
uint32_t message_length;
- S2N_ERROR_IF(conn->actual_protocol_version != S2N_TLS13, S2N_ERR_BAD_MESSAGE);
- GUARD(s2n_stuffer_read_uint8(&conn->in, &post_handshake_id));
- GUARD(s2n_stuffer_read_uint24(&conn->in, &message_length));
+ while(s2n_stuffer_data_available(&conn->in)) {
+ POSIX_GUARD(s2n_stuffer_read_uint8(&conn->in, &post_handshake_id));
+ POSIX_GUARD(s2n_stuffer_read_uint24(&conn->in, &message_length));
- struct s2n_blob post_handshake_blob = {0};
- uint8_t *message_data = s2n_stuffer_raw_read(&conn->in, message_length);
- notnull_check(message_data);
- GUARD(s2n_blob_init(&post_handshake_blob, message_data, message_length));
+ struct s2n_blob post_handshake_blob = { 0 };
+ uint8_t *message_data = s2n_stuffer_raw_read(&conn->in, message_length);
+ POSIX_ENSURE_REF(message_data);
+ POSIX_GUARD(s2n_blob_init(&post_handshake_blob, message_data, message_length));
- struct s2n_stuffer post_handshake_stuffer = {0};
- GUARD(s2n_stuffer_init(&post_handshake_stuffer, &post_handshake_blob));
- GUARD(s2n_stuffer_skip_write(&post_handshake_stuffer, message_length));
+ struct s2n_stuffer post_handshake_stuffer = { 0 };
+ POSIX_GUARD(s2n_stuffer_init(&post_handshake_stuffer, &post_handshake_blob));
+ POSIX_GUARD(s2n_stuffer_skip_write(&post_handshake_stuffer, message_length));
- switch (post_handshake_id)
- {
- case TLS_KEY_UPDATE:
- GUARD(s2n_key_update_recv(conn, &post_handshake_stuffer));
- break;
- default:
- /* Ignore all other messages */
- break;
+ switch (post_handshake_id)
+ {
+ case TLS_KEY_UPDATE:
+ POSIX_GUARD(s2n_key_update_recv(conn, &post_handshake_stuffer));
+ break;
+ case TLS_SERVER_NEW_SESSION_TICKET:
+ POSIX_GUARD_RESULT(s2n_tls13_server_nst_recv(conn, &post_handshake_stuffer));
+ break;
+ case TLS_HELLO_REQUEST:
+ POSIX_GUARD(s2n_client_hello_request_recv(conn));
+ break;
+ case TLS_CLIENT_HELLO:
+ case TLS_SERVER_HELLO:
+ case TLS_END_OF_EARLY_DATA:
+ case TLS_ENCRYPTED_EXTENSIONS:
+ case TLS_CERTIFICATE:
+ case TLS_SERVER_KEY:
+ case TLS_CERT_REQ:
+ case TLS_SERVER_HELLO_DONE:
+ case TLS_CERT_VERIFY:
+ case TLS_CLIENT_KEY:
+ case TLS_FINISHED:
+ case TLS_SERVER_CERT_STATUS:
+ /* All other known handshake messages should be rejected */
+ POSIX_BAIL(S2N_ERR_BAD_MESSAGE);
+ break;
+ default:
+ /* Ignore all other messages */
+ break;
+ }
}
return S2N_SUCCESS;
@@ -59,11 +77,10 @@ int s2n_post_handshake_recv(struct s2n_connection *conn)
int s2n_post_handshake_send(struct s2n_connection *conn, s2n_blocked_status *blocked)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
- GUARD(s2n_key_update_send(conn));
- GUARD(s2n_flush(conn, blocked));
- GUARD(s2n_stuffer_rewrite(&conn->out));
+ POSIX_GUARD(s2n_key_update_send(conn, blocked));
+ POSIX_GUARD_RESULT(s2n_tls13_server_nst_send(conn, blocked));
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_prf.c b/contrib/restricted/aws/s2n/tls/s2n_prf.c
index fbfffb5fad..1096eb6c82 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_prf.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_prf.c
@@ -14,6 +14,7 @@
*/
#include <sys/param.h>
+#include <openssl/hmac.h>
#include <openssl/md5.h>
#include <openssl/sha.h>
#include <string.h>
@@ -23,6 +24,7 @@
#include "tls/s2n_cipher_suites.h"
#include "tls/s2n_connection.h"
#include "tls/s2n_prf.h"
+#include "tls/s2n_tls.h"
#include "stuffer/s2n_stuffer.h"
@@ -35,44 +37,51 @@
#include "utils/s2n_blob.h"
#include "utils/s2n_mem.h"
-static int s2n_sslv3_prf(struct s2n_prf_working_space *ws, struct s2n_blob *secret, struct s2n_blob *seed_a,
+static int s2n_sslv3_prf(struct s2n_connection *conn, struct s2n_blob *secret, struct s2n_blob *seed_a,
struct s2n_blob *seed_b, struct s2n_blob *seed_c, struct s2n_blob *out)
{
- struct s2n_hash_state *md5 = &ws->ssl3.md5;
- struct s2n_hash_state *sha1 = &ws->ssl3.sha1;
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->handshake.hashes);
+ struct s2n_hash_state *workspace = &conn->handshake.hashes->hash_workspace;
uint32_t outputlen = out->size;
uint8_t *output = out->data;
uint8_t iteration = 1;
+ uint8_t md5_digest[MD5_DIGEST_LENGTH] = { 0 }, sha_digest[SHA_DIGEST_LENGTH] = { 0 };
+
uint8_t A = 'A';
while (outputlen) {
- GUARD(s2n_hash_reset(sha1));
+ struct s2n_hash_state *sha1 = workspace;
+ POSIX_GUARD(s2n_hash_reset(sha1));
+ POSIX_GUARD(s2n_hash_init(sha1, S2N_HASH_SHA1));
for (int i = 0; i < iteration; i++) {
- GUARD(s2n_hash_update(sha1, &A, 1));
+ POSIX_GUARD(s2n_hash_update(sha1, &A, 1));
}
- GUARD(s2n_hash_update(sha1, secret->data, secret->size));
- GUARD(s2n_hash_update(sha1, seed_a->data, seed_a->size));
+ POSIX_GUARD(s2n_hash_update(sha1, secret->data, secret->size));
+ POSIX_GUARD(s2n_hash_update(sha1, seed_a->data, seed_a->size));
if (seed_b) {
- GUARD(s2n_hash_update(sha1, seed_b->data, seed_b->size));
+ POSIX_GUARD(s2n_hash_update(sha1, seed_b->data, seed_b->size));
if (seed_c) {
- GUARD(s2n_hash_update(sha1, seed_c->data, seed_c->size));
+ POSIX_GUARD(s2n_hash_update(sha1, seed_c->data, seed_c->size));
}
}
- GUARD(s2n_hash_digest(sha1, ws->ssl3.sha1_digest, sizeof(ws->ssl3.sha1_digest)));
+ POSIX_GUARD(s2n_hash_digest(sha1, sha_digest, sizeof(sha_digest)));
- GUARD(s2n_hash_reset(md5));
- GUARD(s2n_hash_update(md5, secret->data, secret->size));
- GUARD(s2n_hash_update(md5, ws->ssl3.sha1_digest, sizeof(ws->ssl3.sha1_digest)));
- GUARD(s2n_hash_digest(md5, ws->ssl3.md5_digest, sizeof(ws->ssl3.md5_digest)));
+ struct s2n_hash_state *md5 = workspace;
+ POSIX_GUARD(s2n_hash_reset(md5));
+ POSIX_GUARD(s2n_hash_init(md5, S2N_HASH_MD5));
+ POSIX_GUARD(s2n_hash_update(md5, secret->data, secret->size));
+ POSIX_GUARD(s2n_hash_update(md5, sha_digest, sizeof(sha_digest)));
+ POSIX_GUARD(s2n_hash_digest(md5, md5_digest, sizeof(md5_digest)));
- uint32_t bytes_to_copy = MIN(outputlen, sizeof(ws->ssl3.md5_digest));
+ uint32_t bytes_to_copy = MIN(outputlen, sizeof(md5_digest));
- memcpy_check(output, ws->ssl3.md5_digest, bytes_to_copy);
+ POSIX_CHECKED_MEMCPY(output, md5_digest, bytes_to_copy);
outputlen -= bytes_to_copy;
output += bytes_to_copy;
@@ -82,129 +91,201 @@ static int s2n_sslv3_prf(struct s2n_prf_working_space *ws, struct s2n_blob *secr
iteration++;
}
- GUARD(s2n_hash_reset(md5));
- GUARD(s2n_hash_reset(sha1));
-
return 0;
}
+static int s2n_init_md_from_hmac_alg(struct s2n_prf_working_space *ws, s2n_hmac_algorithm alg){
+ switch (alg) {
+ case S2N_HMAC_SSLv3_MD5:
+ case S2N_HMAC_MD5:
+ ws->p_hash.evp_hmac.evp_digest.md = EVP_md5();
+ break;
+ case S2N_HMAC_SSLv3_SHA1:
+ case S2N_HMAC_SHA1:
+ ws->p_hash.evp_hmac.evp_digest.md = EVP_sha1();
+ break;
+ case S2N_HMAC_SHA224:
+ ws->p_hash.evp_hmac.evp_digest.md = EVP_sha224();
+ break;
+ case S2N_HMAC_SHA256:
+ ws->p_hash.evp_hmac.evp_digest.md = EVP_sha256();
+ break;
+ case S2N_HMAC_SHA384:
+ ws->p_hash.evp_hmac.evp_digest.md = EVP_sha384();
+ break;
+ case S2N_HMAC_SHA512:
+ ws->p_hash.evp_hmac.evp_digest.md = EVP_sha512();
+ break;
+ default:
+ POSIX_BAIL(S2N_ERR_P_HASH_INVALID_ALGORITHM);
+ }
+ return S2N_SUCCESS;
+}
+
#if !defined(OPENSSL_IS_BORINGSSL) && !defined(OPENSSL_IS_AWSLC)
-static int s2n_evp_hmac_p_hash_new(struct s2n_prf_working_space *ws)
+static int s2n_evp_pkey_p_hash_alloc(struct s2n_prf_working_space *ws)
{
- notnull_check(ws->tls.p_hash.evp_hmac.evp_digest.ctx = S2N_EVP_MD_CTX_NEW());
+ POSIX_ENSURE_REF(ws->p_hash.evp_hmac.evp_digest.ctx = S2N_EVP_MD_CTX_NEW());
return 0;
}
-static int s2n_evp_hmac_p_hash_digest_init(struct s2n_prf_working_space *ws)
+static int s2n_evp_pkey_p_hash_digest_init(struct s2n_prf_working_space *ws)
{
- notnull_check(ws->tls.p_hash.evp_hmac.evp_digest.md);
- notnull_check(ws->tls.p_hash.evp_hmac.evp_digest.ctx);
- notnull_check(ws->tls.p_hash.evp_hmac.mac_key);
+ POSIX_ENSURE_REF(ws->p_hash.evp_hmac.evp_digest.md);
+ POSIX_ENSURE_REF(ws->p_hash.evp_hmac.evp_digest.ctx);
+ POSIX_ENSURE_REF(ws->p_hash.evp_hmac.ctx.evp_pkey);
/* Ignore the MD5 check when in FIPS mode to comply with the TLS 1.0 RFC */
if (s2n_is_in_fips_mode()) {
- GUARD(s2n_digest_allow_md5_for_fips(&ws->tls.p_hash.evp_hmac.evp_digest));
+ POSIX_GUARD(s2n_digest_allow_md5_for_fips(&ws->p_hash.evp_hmac.evp_digest));
}
- GUARD_OSSL(EVP_DigestSignInit(ws->tls.p_hash.evp_hmac.evp_digest.ctx, NULL, ws->tls.p_hash.evp_hmac.evp_digest.md, NULL, ws->tls.p_hash.evp_hmac.mac_key),
+ POSIX_GUARD_OSSL(EVP_DigestSignInit(ws->p_hash.evp_hmac.evp_digest.ctx, NULL, ws->p_hash.evp_hmac.evp_digest.md, NULL, ws->p_hash.evp_hmac.ctx.evp_pkey),
S2N_ERR_P_HASH_INIT_FAILED);
return 0;
}
-static int s2n_evp_hmac_p_hash_init(struct s2n_prf_working_space *ws, s2n_hmac_algorithm alg, struct s2n_blob *secret)
+static int s2n_evp_pkey_p_hash_init(struct s2n_prf_working_space *ws, s2n_hmac_algorithm alg, struct s2n_blob *secret)
{
/* Initialize the message digest */
- switch (alg) {
- case S2N_HMAC_SSLv3_MD5:
- case S2N_HMAC_MD5:
- ws->tls.p_hash.evp_hmac.evp_digest.md = EVP_md5();
- break;
- case S2N_HMAC_SSLv3_SHA1:
- case S2N_HMAC_SHA1:
- ws->tls.p_hash.evp_hmac.evp_digest.md = EVP_sha1();
- break;
- case S2N_HMAC_SHA224:
- ws->tls.p_hash.evp_hmac.evp_digest.md = EVP_sha224();
- break;
- case S2N_HMAC_SHA256:
- ws->tls.p_hash.evp_hmac.evp_digest.md = EVP_sha256();
- break;
- case S2N_HMAC_SHA384:
- ws->tls.p_hash.evp_hmac.evp_digest.md = EVP_sha384();
- break;
- case S2N_HMAC_SHA512:
- ws->tls.p_hash.evp_hmac.evp_digest.md = EVP_sha512();
- break;
- default:
- S2N_ERROR(S2N_ERR_P_HASH_INVALID_ALGORITHM);
- }
+ POSIX_GUARD(s2n_init_md_from_hmac_alg(ws, alg));
/* Initialize the mac key using the provided secret */
- notnull_check(ws->tls.p_hash.evp_hmac.mac_key = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, secret->data, secret->size));
+ POSIX_ENSURE_REF(ws->p_hash.evp_hmac.ctx.evp_pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, secret->data, secret->size));
/* Initialize the message digest context with the above message digest and mac key */
- return s2n_evp_hmac_p_hash_digest_init(ws);
+ return s2n_evp_pkey_p_hash_digest_init(ws);
}
-static int s2n_evp_hmac_p_hash_update(struct s2n_prf_working_space *ws, const void *data, uint32_t size)
+static int s2n_evp_pkey_p_hash_update(struct s2n_prf_working_space *ws, const void *data, uint32_t size)
{
- GUARD_OSSL(EVP_DigestSignUpdate(ws->tls.p_hash.evp_hmac.evp_digest.ctx, data, (size_t)size), S2N_ERR_P_HASH_UPDATE_FAILED);
+ POSIX_GUARD_OSSL(EVP_DigestSignUpdate(ws->p_hash.evp_hmac.evp_digest.ctx, data, (size_t)size), S2N_ERR_P_HASH_UPDATE_FAILED);
return 0;
}
-static int s2n_evp_hmac_p_hash_digest(struct s2n_prf_working_space *ws, void *digest, uint32_t size)
+static int s2n_evp_pkey_p_hash_final(struct s2n_prf_working_space *ws, void *digest, uint32_t size)
{
/* EVP_DigestSign API's require size_t data structures */
size_t digest_size = size;
- GUARD_OSSL(EVP_DigestSignFinal(ws->tls.p_hash.evp_hmac.evp_digest.ctx, (unsigned char *)digest, &digest_size), S2N_ERR_P_HASH_FINAL_FAILED);
+ POSIX_GUARD_OSSL(EVP_DigestSignFinal(ws->p_hash.evp_hmac.evp_digest.ctx, (unsigned char *)digest, &digest_size), S2N_ERR_P_HASH_FINAL_FAILED);
return 0;
}
-static int s2n_evp_hmac_p_hash_wipe(struct s2n_prf_working_space *ws)
+static int s2n_evp_pkey_p_hash_wipe(struct s2n_prf_working_space *ws)
{
- GUARD_OSSL(S2N_EVP_MD_CTX_RESET(ws->tls.p_hash.evp_hmac.evp_digest.ctx), S2N_ERR_P_HASH_WIPE_FAILED);
+ POSIX_GUARD_OSSL(S2N_EVP_MD_CTX_RESET(ws->p_hash.evp_hmac.evp_digest.ctx), S2N_ERR_P_HASH_WIPE_FAILED);
return 0;
}
-static int s2n_evp_hmac_p_hash_reset(struct s2n_prf_working_space *ws)
+static int s2n_evp_pkey_p_hash_reset(struct s2n_prf_working_space *ws)
{
- GUARD(s2n_evp_hmac_p_hash_wipe(ws));
+ POSIX_GUARD(s2n_evp_pkey_p_hash_wipe(ws));
- return s2n_evp_hmac_p_hash_digest_init(ws);
+ /*
+ * On some cleanup paths s2n_evp_pkey_p_hash_reset can be called before s2n_evp_pkey_p_hash_init so there is nothing
+ * to reset.
+ */
+ if (ws->p_hash.evp_hmac.ctx.evp_pkey == NULL) {
+ return S2N_SUCCESS;
+ }
+ return s2n_evp_pkey_p_hash_digest_init(ws);
}
-static int s2n_evp_hmac_p_hash_cleanup(struct s2n_prf_working_space *ws)
+static int s2n_evp_pkey_p_hash_cleanup(struct s2n_prf_working_space *ws)
{
/* Prepare the workspace md_ctx for the next p_hash */
- GUARD(s2n_evp_hmac_p_hash_wipe(ws));
+ POSIX_GUARD(s2n_evp_pkey_p_hash_wipe(ws));
/* Free mac key - PKEYs cannot be reused */
- notnull_check(ws->tls.p_hash.evp_hmac.mac_key);
- EVP_PKEY_free(ws->tls.p_hash.evp_hmac.mac_key);
- ws->tls.p_hash.evp_hmac.mac_key = NULL;
+ POSIX_ENSURE_REF(ws->p_hash.evp_hmac.ctx.evp_pkey);
+ EVP_PKEY_free(ws->p_hash.evp_hmac.ctx.evp_pkey);
+ ws->p_hash.evp_hmac.ctx.evp_pkey = NULL;
return 0;
}
-static int s2n_evp_hmac_p_hash_free(struct s2n_prf_working_space *ws)
+static int s2n_evp_pkey_p_hash_free(struct s2n_prf_working_space *ws)
{
- notnull_check(ws->tls.p_hash.evp_hmac.evp_digest.ctx);
- S2N_EVP_MD_CTX_FREE(ws->tls.p_hash.evp_hmac.evp_digest.ctx);
- ws->tls.p_hash.evp_hmac.evp_digest.ctx = NULL;
+ POSIX_ENSURE_REF(ws->p_hash.evp_hmac.evp_digest.ctx);
+ S2N_EVP_MD_CTX_FREE(ws->p_hash.evp_hmac.evp_digest.ctx);
+ ws->p_hash.evp_hmac.evp_digest.ctx = NULL;
return 0;
}
-static const struct s2n_p_hash_hmac s2n_evp_hmac = {
- .alloc = &s2n_evp_hmac_p_hash_new,
+static const struct s2n_p_hash_hmac s2n_evp_pkey_p_hash_hmac = {
+ .alloc = &s2n_evp_pkey_p_hash_alloc,
+ .init = &s2n_evp_pkey_p_hash_init,
+ .update = &s2n_evp_pkey_p_hash_update,
+ .final = &s2n_evp_pkey_p_hash_final,
+ .reset = &s2n_evp_pkey_p_hash_reset,
+ .cleanup = &s2n_evp_pkey_p_hash_cleanup,
+ .free = &s2n_evp_pkey_p_hash_free,
+};
+#else
+static int s2n_evp_hmac_p_hash_alloc(struct s2n_prf_working_space *ws)
+{
+ POSIX_ENSURE_REF(ws->p_hash.evp_hmac.ctx.hmac_ctx = HMAC_CTX_new());
+ return S2N_SUCCESS;
+}
+
+static int s2n_evp_hmac_p_hash_init(struct s2n_prf_working_space *ws, s2n_hmac_algorithm alg, struct s2n_blob *secret)
+{
+ /* Figure out the correct EVP_MD from s2n_hmac_algorithm */
+ POSIX_GUARD(s2n_init_md_from_hmac_alg(ws, alg));
+
+ /* Initialize the mac and digest */
+ POSIX_GUARD_OSSL(HMAC_Init_ex(ws->p_hash.evp_hmac.ctx.hmac_ctx, secret->data, secret->size, ws->p_hash.evp_hmac.evp_digest.md, NULL), S2N_ERR_P_HASH_INIT_FAILED);
+ return S2N_SUCCESS;
+}
+
+static int s2n_evp_hmac_p_hash_update(struct s2n_prf_working_space *ws, const void *data, uint32_t size)
+{
+ POSIX_GUARD_OSSL(HMAC_Update(ws->p_hash.evp_hmac.ctx.hmac_ctx, data, (size_t)size), S2N_ERR_P_HASH_UPDATE_FAILED);
+ return S2N_SUCCESS;
+}
+
+static int s2n_evp_hmac_p_hash_final(struct s2n_prf_working_space *ws, void *digest, uint32_t size)
+{
+ /* HMAC_Final API's require size_t data structures */
+ unsigned int digest_size = size;
+ POSIX_GUARD_OSSL(HMAC_Final(ws->p_hash.evp_hmac.ctx.hmac_ctx, (unsigned char *)digest, &digest_size), S2N_ERR_P_HASH_FINAL_FAILED);
+ return S2N_SUCCESS;
+}
+
+static int s2n_evp_hmac_p_hash_reset(struct s2n_prf_working_space *ws)
+{
+ POSIX_ENSURE_REF(ws);
+ if (ws->p_hash.evp_hmac.evp_digest.md == NULL) {
+ return S2N_SUCCESS;
+ }
+ POSIX_GUARD_OSSL(HMAC_Init_ex(ws->p_hash.evp_hmac.ctx.hmac_ctx, NULL, 0, ws->p_hash.evp_hmac.evp_digest.md, NULL), S2N_ERR_P_HASH_INIT_FAILED);
+ return S2N_SUCCESS;
+}
+
+static int s2n_evp_hmac_p_hash_cleanup(struct s2n_prf_working_space *ws)
+{
+ /* Prepare the workspace md_ctx for the next p_hash */
+ HMAC_CTX_reset(ws->p_hash.evp_hmac.ctx.hmac_ctx);
+ return S2N_SUCCESS;
+}
+
+static int s2n_evp_hmac_p_hash_free(struct s2n_prf_working_space *ws)
+{
+ HMAC_CTX_free(ws->p_hash.evp_hmac.ctx.hmac_ctx);
+ return S2N_SUCCESS;
+}
+
+static const struct s2n_p_hash_hmac s2n_evp_hmac_p_hash_hmac = {
+ .alloc = &s2n_evp_hmac_p_hash_alloc,
.init = &s2n_evp_hmac_p_hash_init,
.update = &s2n_evp_hmac_p_hash_update,
- .final = &s2n_evp_hmac_p_hash_digest,
+ .final = &s2n_evp_hmac_p_hash_final,
.reset = &s2n_evp_hmac_p_hash_reset,
.cleanup = &s2n_evp_hmac_p_hash_cleanup,
.free = &s2n_evp_hmac_p_hash_free,
@@ -213,29 +294,35 @@ static const struct s2n_p_hash_hmac s2n_evp_hmac = {
static int s2n_hmac_p_hash_new(struct s2n_prf_working_space *ws)
{
- GUARD(s2n_hmac_new(&ws->tls.p_hash.s2n_hmac));
+ POSIX_GUARD(s2n_hmac_new(&ws->p_hash.s2n_hmac));
- return s2n_hmac_init(&ws->tls.p_hash.s2n_hmac, S2N_HMAC_NONE, NULL, 0);
+ return s2n_hmac_init(&ws->p_hash.s2n_hmac, S2N_HMAC_NONE, NULL, 0);
}
static int s2n_hmac_p_hash_init(struct s2n_prf_working_space *ws, s2n_hmac_algorithm alg, struct s2n_blob *secret)
{
- return s2n_hmac_init(&ws->tls.p_hash.s2n_hmac, alg, secret->data, secret->size);
+ return s2n_hmac_init(&ws->p_hash.s2n_hmac, alg, secret->data, secret->size);
}
static int s2n_hmac_p_hash_update(struct s2n_prf_working_space *ws, const void *data, uint32_t size)
{
- return s2n_hmac_update(&ws->tls.p_hash.s2n_hmac, data, size);
+ return s2n_hmac_update(&ws->p_hash.s2n_hmac, data, size);
}
static int s2n_hmac_p_hash_digest(struct s2n_prf_working_space *ws, void *digest, uint32_t size)
{
- return s2n_hmac_digest(&ws->tls.p_hash.s2n_hmac, digest, size);
+ return s2n_hmac_digest(&ws->p_hash.s2n_hmac, digest, size);
}
static int s2n_hmac_p_hash_reset(struct s2n_prf_working_space *ws)
{
- return s2n_hmac_reset(&ws->tls.p_hash.s2n_hmac);
+ /* If we actually initialized s2n_hmac, wipe it.
+ * A valid, initialized s2n_hmac_state will have a valid block size.
+ */
+ if (ws->p_hash.s2n_hmac.hash_block_size != 0) {
+ return s2n_hmac_reset(&ws->p_hash.s2n_hmac);
+ }
+ return S2N_SUCCESS;
}
static int s2n_hmac_p_hash_cleanup(struct s2n_prf_working_space *ws)
@@ -245,10 +332,10 @@ static int s2n_hmac_p_hash_cleanup(struct s2n_prf_working_space *ws)
static int s2n_hmac_p_hash_free(struct s2n_prf_working_space *ws)
{
- return s2n_hmac_free(&ws->tls.p_hash.s2n_hmac);
+ return s2n_hmac_free(&ws->p_hash.s2n_hmac);
}
-static const struct s2n_p_hash_hmac s2n_hmac = {
+static const struct s2n_p_hash_hmac s2n_internal_p_hash_hmac = {
.alloc = &s2n_hmac_p_hash_new,
.init = &s2n_hmac_p_hash_init,
.update = &s2n_hmac_p_hash_update,
@@ -258,103 +345,129 @@ static const struct s2n_p_hash_hmac s2n_hmac = {
.free = &s2n_hmac_p_hash_free,
};
+const struct s2n_p_hash_hmac *s2n_get_hmac_implementation() {
+#if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)
+ return s2n_is_in_fips_mode() ? &s2n_evp_hmac_p_hash_hmac : &s2n_internal_p_hash_hmac;
+#else
+ return s2n_is_in_fips_mode() ? &s2n_evp_pkey_p_hash_hmac : &s2n_internal_p_hash_hmac;
+#endif
+}
+
static int s2n_p_hash(struct s2n_prf_working_space *ws, s2n_hmac_algorithm alg, struct s2n_blob *secret, struct s2n_blob *label,
struct s2n_blob *seed_a, struct s2n_blob *seed_b, struct s2n_blob *seed_c, struct s2n_blob *out)
{
uint8_t digest_size;
- GUARD(s2n_hmac_digest_size(alg, &digest_size));
+ POSIX_GUARD(s2n_hmac_digest_size(alg, &digest_size));
- const struct s2n_p_hash_hmac *hmac = ws->tls.p_hash_hmac_impl;
+ const struct s2n_p_hash_hmac *hmac = s2n_get_hmac_implementation();
/* First compute hmac(secret + A(0)) */
- GUARD(hmac->init(ws, alg, secret));
- GUARD(hmac->update(ws, label->data, label->size));
- GUARD(hmac->update(ws, seed_a->data, seed_a->size));
+ POSIX_GUARD(hmac->init(ws, alg, secret));
+ POSIX_GUARD(hmac->update(ws, label->data, label->size));
+ POSIX_GUARD(hmac->update(ws, seed_a->data, seed_a->size));
if (seed_b) {
- GUARD(hmac->update(ws, seed_b->data, seed_b->size));
+ POSIX_GUARD(hmac->update(ws, seed_b->data, seed_b->size));
if (seed_c) {
- GUARD(hmac->update(ws, seed_c->data, seed_c->size));
+ POSIX_GUARD(hmac->update(ws, seed_c->data, seed_c->size));
}
}
- GUARD(hmac->final(ws, ws->tls.digest0, digest_size));
+ POSIX_GUARD(hmac->final(ws, ws->digest0, digest_size));
uint32_t outputlen = out->size;
uint8_t *output = out->data;
while (outputlen) {
/* Now compute hmac(secret + A(N - 1) + seed) */
- GUARD(hmac->reset(ws));
- GUARD(hmac->update(ws, ws->tls.digest0, digest_size));
+ POSIX_GUARD(hmac->reset(ws));
+ POSIX_GUARD(hmac->update(ws, ws->digest0, digest_size));
/* Add the label + seed and compute this round's A */
- GUARD(hmac->update(ws, label->data, label->size));
- GUARD(hmac->update(ws, seed_a->data, seed_a->size));
+ POSIX_GUARD(hmac->update(ws, label->data, label->size));
+ POSIX_GUARD(hmac->update(ws, seed_a->data, seed_a->size));
if (seed_b) {
- GUARD(hmac->update(ws, seed_b->data, seed_b->size));
+ POSIX_GUARD(hmac->update(ws, seed_b->data, seed_b->size));
if (seed_c) {
- GUARD(hmac->update(ws, seed_c->data, seed_c->size));
+ POSIX_GUARD(hmac->update(ws, seed_c->data, seed_c->size));
}
}
- GUARD(hmac->final(ws, ws->tls.digest1, digest_size));
+ POSIX_GUARD(hmac->final(ws, ws->digest1, digest_size));
uint32_t bytes_to_xor = MIN(outputlen, digest_size);
- for (int i = 0; i < bytes_to_xor; i++) {
- *output ^= ws->tls.digest1[i];
+ for (uint32_t i = 0; i < bytes_to_xor; i++) {
+ *output ^= ws->digest1[i];
output++;
outputlen--;
}
/* Stash a digest of A(N), in A(N), for the next round */
- GUARD(hmac->reset(ws));
- GUARD(hmac->update(ws, ws->tls.digest0, digest_size));
- GUARD(hmac->final(ws, ws->tls.digest0, digest_size));
+ POSIX_GUARD(hmac->reset(ws));
+ POSIX_GUARD(hmac->update(ws, ws->digest0, digest_size));
+ POSIX_GUARD(hmac->final(ws, ws->digest0, digest_size));
}
- GUARD(hmac->cleanup(ws));
+ POSIX_GUARD(hmac->cleanup(ws));
return 0;
}
-const struct s2n_p_hash_hmac *s2n_get_hmac_implementation() {
-#if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)
- return &s2n_hmac;
-#else
- return s2n_is_in_fips_mode() ? &s2n_evp_hmac : &s2n_hmac;
-#endif
+S2N_RESULT s2n_prf_new(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_EQ(conn->prf_space, NULL);
+
+ DEFER_CLEANUP(struct s2n_blob mem = { 0 }, s2n_free);
+ RESULT_GUARD_POSIX(s2n_realloc(&mem, sizeof(struct s2n_prf_working_space)));
+ RESULT_GUARD_POSIX(s2n_blob_zero(&mem));
+ conn->prf_space = (struct s2n_prf_working_space*)(void*) mem.data;
+ ZERO_TO_DISABLE_DEFER_CLEANUP(mem);
+
+ /* Allocate the hmac state */
+ const struct s2n_p_hash_hmac *hmac_impl = s2n_get_hmac_implementation();
+ RESULT_GUARD_POSIX(hmac_impl->alloc(conn->prf_space));
+ return S2N_RESULT_OK;
}
-int s2n_prf_new(struct s2n_connection *conn)
+S2N_RESULT s2n_prf_wipe(struct s2n_connection *conn)
{
- /* Set p_hash_hmac_impl on initial prf creation.
- * When in FIPS mode, the EVP API's must be used for the p_hash HMAC.
- */
- conn->prf_space.tls.p_hash_hmac_impl = s2n_get_hmac_implementation();
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn->prf_space);
+
+ const struct s2n_p_hash_hmac *hmac_impl = s2n_get_hmac_implementation();
+ RESULT_GUARD_POSIX(hmac_impl->reset(conn->prf_space));
- return conn->prf_space.tls.p_hash_hmac_impl->alloc(&conn->prf_space);
+ return S2N_RESULT_OK;
}
-int s2n_prf_free(struct s2n_connection *conn)
+S2N_RESULT s2n_prf_free(struct s2n_connection *conn)
{
- /* Ensure that p_hash_hmac_impl is set, as it may have been reset for prf_space on s2n_connection_wipe.
- * When in FIPS mode, the EVP API's must be used for the p_hash HMAC.
- */
- conn->prf_space.tls.p_hash_hmac_impl = s2n_get_hmac_implementation();
+ RESULT_ENSURE_REF(conn);
+ if (conn->prf_space == NULL) {
+ return S2N_RESULT_OK;
+ }
- return conn->prf_space.tls.p_hash_hmac_impl->free(&conn->prf_space);
+ const struct s2n_p_hash_hmac *hmac_impl = s2n_get_hmac_implementation();
+ RESULT_GUARD_POSIX(hmac_impl->free(conn->prf_space));
+
+ RESULT_GUARD_POSIX(s2n_free_object((uint8_t **) &conn->prf_space, sizeof(struct s2n_prf_working_space)));
+ return S2N_RESULT_OK;
}
static int s2n_prf(struct s2n_connection *conn, struct s2n_blob *secret, struct s2n_blob *label, struct s2n_blob *seed_a,
struct s2n_blob *seed_b, struct s2n_blob *seed_c, struct s2n_blob *out)
{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(secret);
+ POSIX_ENSURE_REF(conn->prf_space);
+
/* seed_a is always required, seed_b is optional, if seed_c is provided seed_b must also be provided */
S2N_ERROR_IF(seed_a == NULL, S2N_ERR_PRF_INVALID_SEED);
S2N_ERROR_IF(seed_b == NULL && seed_c != NULL, S2N_ERR_PRF_INVALID_SEED);
if (conn->actual_protocol_version == S2N_SSLv3) {
- return s2n_sslv3_prf(&conn->prf_space, secret, seed_a, seed_b, seed_c, out);
+ return s2n_sslv3_prf(conn, secret, seed_a, seed_b, seed_c, out);
}
/* We zero the out blob because p_hash works by XOR'ing with the existing
@@ -363,32 +476,27 @@ static int s2n_prf(struct s2n_connection *conn, struct s2n_blob *secret, struct
* the right values. When we call it twice in the regular case, the two
* outputs will be XORd just ass the TLS 1.0 and 1.1 RFCs require.
*/
- GUARD(s2n_blob_zero(out));
-
- /* Ensure that p_hash_hmac_impl is set, as it may have been reset for prf_space on s2n_connection_wipe.
- * When in FIPS mode, the EVP API's must be used for the p_hash HMAC.
- */
- conn->prf_space.tls.p_hash_hmac_impl = s2n_get_hmac_implementation();
-
+ POSIX_GUARD(s2n_blob_zero(out));
+
if (conn->actual_protocol_version == S2N_TLS12) {
- return s2n_p_hash(&conn->prf_space, conn->secure.cipher_suite->prf_alg, secret, label, seed_a, seed_b,
+ return s2n_p_hash(conn->prf_space, conn->secure.cipher_suite->prf_alg, secret, label, seed_a, seed_b,
seed_c, out);
}
struct s2n_blob half_secret = {.data = secret->data,.size = (secret->size + 1) / 2 };
- GUARD(s2n_p_hash(&conn->prf_space, S2N_HMAC_MD5, &half_secret, label, seed_a, seed_b, seed_c, out));
+ POSIX_GUARD(s2n_p_hash(conn->prf_space, S2N_HMAC_MD5, &half_secret, label, seed_a, seed_b, seed_c, out));
half_secret.data += secret->size - half_secret.size;
- GUARD(s2n_p_hash(&conn->prf_space, S2N_HMAC_SHA1, &half_secret, label, seed_a, seed_b, seed_c, out));
+ POSIX_GUARD(s2n_p_hash(conn->prf_space, S2N_HMAC_SHA1, &half_secret, label, seed_a, seed_b, seed_c, out));
return 0;
}
int s2n_tls_prf_master_secret(struct s2n_connection *conn, struct s2n_blob *premaster_secret)
{
- struct s2n_blob client_random = {.size = sizeof(conn->secure.client_random), .data = conn->secure.client_random};
- struct s2n_blob server_random = {.size = sizeof(conn->secure.server_random), .data = conn->secure.server_random};
- struct s2n_blob master_secret = {.size = sizeof(conn->secure.master_secret), .data = conn->secure.master_secret};
+ struct s2n_blob client_random = {.size = sizeof(conn->handshake_params.client_random), .data = conn->handshake_params.client_random};
+ struct s2n_blob server_random = {.size = sizeof(conn->handshake_params.server_random), .data = conn->handshake_params.server_random};
+ struct s2n_blob master_secret = {.size = sizeof(conn->secrets.tls12.master_secret), .data = conn->secrets.tls12.master_secret};
uint8_t master_secret_label[] = "master secret";
struct s2n_blob label = {.size = sizeof(master_secret_label) - 1, .data = master_secret_label};
@@ -398,18 +506,103 @@ int s2n_tls_prf_master_secret(struct s2n_connection *conn, struct s2n_blob *prem
int s2n_hybrid_prf_master_secret(struct s2n_connection *conn, struct s2n_blob *premaster_secret)
{
- struct s2n_blob client_random = {.size = sizeof(conn->secure.client_random), .data = conn->secure.client_random};
- struct s2n_blob server_random = {.size = sizeof(conn->secure.server_random), .data = conn->secure.server_random};
- struct s2n_blob master_secret = {.size = sizeof(conn->secure.master_secret), .data = conn->secure.master_secret};
+ struct s2n_blob client_random = {.size = sizeof(conn->handshake_params.client_random), .data = conn->handshake_params.client_random};
+ struct s2n_blob server_random = {.size = sizeof(conn->handshake_params.server_random), .data = conn->handshake_params.server_random};
+ struct s2n_blob master_secret = {.size = sizeof(conn->secrets.tls12.master_secret), .data = conn->secrets.tls12.master_secret};
uint8_t master_secret_label[] = "hybrid master secret";
struct s2n_blob label = {.size = sizeof(master_secret_label) - 1, .data = master_secret_label};
- return s2n_prf(conn, premaster_secret, &label, &client_random, &server_random, &conn->secure.client_key_exchange_message, &master_secret);
+ return s2n_prf(conn, premaster_secret, &label, &client_random, &server_random, &conn->kex_params.client_key_exchange_message, &master_secret);
}
-static int s2n_sslv3_finished(struct s2n_connection *conn, uint8_t prefix[4], struct s2n_hash_state *md5, struct s2n_hash_state *sha1, uint8_t * out)
+int s2n_prf_calculate_master_secret(struct s2n_connection *conn, struct s2n_blob *premaster_secret)
{
+ POSIX_ENSURE_REF(conn);
+
+ POSIX_ENSURE_EQ(s2n_conn_get_current_message_type(conn), CLIENT_KEY);
+
+ if(!conn->ems_negotiated) {
+ POSIX_GUARD(s2n_tls_prf_master_secret(conn, premaster_secret));
+ return S2N_SUCCESS;
+ }
+
+ /* Only the client writes the Client Key Exchange message */
+ if (conn->mode == S2N_CLIENT) {
+ POSIX_GUARD(s2n_handshake_finish_header(&conn->handshake.io));
+ }
+ struct s2n_stuffer client_key_message = conn->handshake.io;
+ POSIX_GUARD(s2n_stuffer_reread(&client_key_message));
+ uint32_t client_key_message_size = s2n_stuffer_data_available(&client_key_message);
+ struct s2n_blob client_key_blob = { 0 };
+ POSIX_GUARD(s2n_blob_init(&client_key_blob, client_key_message.blob.data, client_key_message_size));
+
+ uint8_t data[S2N_MAX_DIGEST_LEN] = { 0 };
+ struct s2n_blob digest = { 0 };
+ POSIX_GUARD(s2n_blob_init(&digest, data, sizeof(data)));
+ if (conn->actual_protocol_version < S2N_TLS12) {
+ uint8_t sha1_data[S2N_MAX_DIGEST_LEN] = { 0 };
+ struct s2n_blob sha1_digest = { 0 };
+ POSIX_GUARD(s2n_blob_init(&sha1_digest, sha1_data, sizeof(sha1_data)));
+ POSIX_GUARD_RESULT(s2n_prf_get_digest_for_ems(conn, &client_key_blob, S2N_HASH_MD5, &digest));
+ POSIX_GUARD_RESULT(s2n_prf_get_digest_for_ems(conn, &client_key_blob, S2N_HASH_SHA1, &sha1_digest));
+ POSIX_GUARD_RESULT(s2n_tls_prf_extended_master_secret(conn, premaster_secret, &digest, &sha1_digest));
+ } else {
+ s2n_hmac_algorithm prf_alg = conn->secure.cipher_suite->prf_alg;
+ s2n_hash_algorithm hash_alg = 0;
+ POSIX_GUARD(s2n_hmac_hash_alg(prf_alg, &hash_alg));
+ POSIX_GUARD_RESULT(s2n_prf_get_digest_for_ems(conn, &client_key_blob, hash_alg, &digest));
+ POSIX_GUARD_RESULT(s2n_tls_prf_extended_master_secret(conn, premaster_secret, &digest, NULL));
+ }
+ return S2N_SUCCESS;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc7627#section-4
+ *# When the extended master secret extension is negotiated in a full
+ *# handshake, the "master_secret" is computed as
+ *#
+ *# master_secret = PRF(pre_master_secret, "extended master secret",
+ *# session_hash)
+ *# [0..47];
+ */
+S2N_RESULT s2n_tls_prf_extended_master_secret(struct s2n_connection *conn, struct s2n_blob *premaster_secret, struct s2n_blob *session_hash, struct s2n_blob *sha1_hash)
+{
+ struct s2n_blob extended_master_secret = {.size = sizeof(conn->secrets.tls12.master_secret), .data = conn->secrets.tls12.master_secret};
+
+ uint8_t extended_master_secret_label[] = "extended master secret";
+ /* Subtract one from the label size to remove the "\0" */
+ struct s2n_blob label = {.size = sizeof(extended_master_secret_label) - 1, .data = extended_master_secret_label};
+
+ RESULT_GUARD_POSIX(s2n_prf(conn, premaster_secret, &label, session_hash, sha1_hash, NULL, &extended_master_secret));
+
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_prf_get_digest_for_ems(struct s2n_connection *conn, struct s2n_blob *message, s2n_hash_algorithm hash_alg, struct s2n_blob *output)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn->handshake.hashes);
+ RESULT_ENSURE_REF(output);
+
+ struct s2n_hash_state *hash_state = &conn->handshake.hashes->hash_workspace;
+ RESULT_GUARD(s2n_handshake_copy_hash_state(conn, hash_alg, hash_state));
+ RESULT_GUARD_POSIX(s2n_hash_update(hash_state, message->data, message->size));
+
+ uint8_t digest_size = 0;
+ RESULT_GUARD_POSIX(s2n_hash_digest_size(hash_alg, &digest_size));
+ RESULT_ENSURE_GTE(output->size, digest_size);
+ RESULT_GUARD_POSIX(s2n_hash_digest(hash_state, output->data, digest_size));
+ output->size = digest_size;
+
+ return S2N_RESULT_OK;
+}
+
+static int s2n_sslv3_finished(struct s2n_connection *conn, uint8_t prefix[4], struct s2n_hash_state *hash_workspace, uint8_t * out)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->handshake.hashes);
+
uint8_t xorpad1[48] =
{ 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
@@ -421,55 +614,64 @@ static int s2n_sslv3_finished(struct s2n_connection *conn, uint8_t prefix[4], st
uint8_t *md5_digest = out;
uint8_t *sha_digest = out + MD5_DIGEST_LENGTH;
- lte_check(MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH, sizeof(conn->handshake.client_finished));
-
- GUARD(s2n_hash_update(md5, prefix, 4));
- GUARD(s2n_hash_update(md5, conn->secure.master_secret, sizeof(conn->secure.master_secret)));
- GUARD(s2n_hash_update(md5, xorpad1, 48));
- GUARD(s2n_hash_digest(md5, md5_digest, MD5_DIGEST_LENGTH));
- GUARD(s2n_hash_reset(md5));
- GUARD(s2n_hash_update(md5, conn->secure.master_secret, sizeof(conn->secure.master_secret)));
- GUARD(s2n_hash_update(md5, xorpad2, 48));
- GUARD(s2n_hash_update(md5, md5_digest, MD5_DIGEST_LENGTH));
- GUARD(s2n_hash_digest(md5, md5_digest, MD5_DIGEST_LENGTH));
- GUARD(s2n_hash_reset(md5));
-
- GUARD(s2n_hash_update(sha1, prefix, 4));
- GUARD(s2n_hash_update(sha1, conn->secure.master_secret, sizeof(conn->secure.master_secret)));
- GUARD(s2n_hash_update(sha1, xorpad1, 40));
- GUARD(s2n_hash_digest(sha1, sha_digest, SHA_DIGEST_LENGTH));
- GUARD(s2n_hash_reset(sha1));
- GUARD(s2n_hash_update(sha1, conn->secure.master_secret, sizeof(conn->secure.master_secret)));
- GUARD(s2n_hash_update(sha1, xorpad2, 40));
- GUARD(s2n_hash_update(sha1, sha_digest, SHA_DIGEST_LENGTH));
- GUARD(s2n_hash_digest(sha1, sha_digest, SHA_DIGEST_LENGTH));
- GUARD(s2n_hash_reset(sha1));
+ POSIX_ENSURE_LTE(MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH, sizeof(conn->handshake.client_finished));
+
+ struct s2n_hash_state *md5 = hash_workspace;
+ POSIX_GUARD(s2n_hash_copy(md5, &conn->handshake.hashes->md5));
+ POSIX_GUARD(s2n_hash_update(md5, prefix, 4));
+ POSIX_GUARD(s2n_hash_update(md5, conn->secrets.tls12.master_secret, sizeof(conn->secrets.tls12.master_secret)));
+ POSIX_GUARD(s2n_hash_update(md5, xorpad1, 48));
+ POSIX_GUARD(s2n_hash_digest(md5, md5_digest, MD5_DIGEST_LENGTH));
+ POSIX_GUARD(s2n_hash_reset(md5));
+ POSIX_GUARD(s2n_hash_update(md5, conn->secrets.tls12.master_secret, sizeof(conn->secrets.tls12.master_secret)));
+ POSIX_GUARD(s2n_hash_update(md5, xorpad2, 48));
+ POSIX_GUARD(s2n_hash_update(md5, md5_digest, MD5_DIGEST_LENGTH));
+ POSIX_GUARD(s2n_hash_digest(md5, md5_digest, MD5_DIGEST_LENGTH));
+ POSIX_GUARD(s2n_hash_reset(md5));
+
+ struct s2n_hash_state *sha1 = hash_workspace;
+ POSIX_GUARD(s2n_hash_copy(sha1, &conn->handshake.hashes->sha1));
+ POSIX_GUARD(s2n_hash_update(sha1, prefix, 4));
+ POSIX_GUARD(s2n_hash_update(sha1, conn->secrets.tls12.master_secret, sizeof(conn->secrets.tls12.master_secret)));
+ POSIX_GUARD(s2n_hash_update(sha1, xorpad1, 40));
+ POSIX_GUARD(s2n_hash_digest(sha1, sha_digest, SHA_DIGEST_LENGTH));
+ POSIX_GUARD(s2n_hash_reset(sha1));
+ POSIX_GUARD(s2n_hash_update(sha1, conn->secrets.tls12.master_secret, sizeof(conn->secrets.tls12.master_secret)));
+ POSIX_GUARD(s2n_hash_update(sha1, xorpad2, 40));
+ POSIX_GUARD(s2n_hash_update(sha1, sha_digest, SHA_DIGEST_LENGTH));
+ POSIX_GUARD(s2n_hash_digest(sha1, sha_digest, SHA_DIGEST_LENGTH));
+ POSIX_GUARD(s2n_hash_reset(sha1));
return 0;
}
static int s2n_sslv3_client_finished(struct s2n_connection *conn)
{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->handshake.hashes);
+
uint8_t prefix[4] = { 0x43, 0x4c, 0x4e, 0x54 };
- lte_check(MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH, sizeof(conn->handshake.client_finished));
- GUARD(s2n_hash_copy(&conn->handshake.prf_md5_hash_copy, &conn->handshake.md5));
- GUARD(s2n_hash_copy(&conn->handshake.prf_sha1_hash_copy, &conn->handshake.sha1));
- return s2n_sslv3_finished(conn, prefix, &conn->handshake.prf_md5_hash_copy, &conn->handshake.prf_sha1_hash_copy, conn->handshake.client_finished);
+ POSIX_ENSURE_LTE(MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH, sizeof(conn->handshake.client_finished));
+ return s2n_sslv3_finished(conn, prefix, &conn->handshake.hashes->hash_workspace, conn->handshake.client_finished);
}
static int s2n_sslv3_server_finished(struct s2n_connection *conn)
{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->handshake.hashes);
+
uint8_t prefix[4] = { 0x53, 0x52, 0x56, 0x52 };
- lte_check(MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH, sizeof(conn->handshake.server_finished));
- GUARD(s2n_hash_copy(&conn->handshake.prf_md5_hash_copy, &conn->handshake.md5));
- GUARD(s2n_hash_copy(&conn->handshake.prf_sha1_hash_copy, &conn->handshake.sha1));
- return s2n_sslv3_finished(conn, prefix, &conn->handshake.prf_md5_hash_copy, &conn->handshake.prf_sha1_hash_copy, conn->handshake.server_finished);
+ POSIX_ENSURE_LTE(MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH, sizeof(conn->handshake.server_finished));
+ return s2n_sslv3_finished(conn, prefix, &conn->handshake.hashes->hash_workspace, conn->handshake.server_finished);
}
int s2n_prf_client_finished(struct s2n_connection *conn)
{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->handshake.hashes);
+
struct s2n_blob master_secret, md5, sha;
uint8_t md5_digest[MD5_DIGEST_LENGTH];
uint8_t sha_digest[SHA384_DIGEST_LENGTH];
@@ -486,35 +688,35 @@ int s2n_prf_client_finished(struct s2n_connection *conn)
label.data = client_finished_label;
label.size = sizeof(client_finished_label) - 1;
- master_secret.data = conn->secure.master_secret;
- master_secret.size = sizeof(conn->secure.master_secret);
+ master_secret.data = conn->secrets.tls12.master_secret;
+ master_secret.size = sizeof(conn->secrets.tls12.master_secret);
if (conn->actual_protocol_version == S2N_TLS12) {
switch (conn->secure.cipher_suite->prf_alg) {
case S2N_HMAC_SHA256:
- GUARD(s2n_hash_copy(&conn->handshake.prf_tls12_hash_copy, &conn->handshake.sha256));
- GUARD(s2n_hash_digest(&conn->handshake.prf_tls12_hash_copy, sha_digest, SHA256_DIGEST_LENGTH));
+ POSIX_GUARD(s2n_hash_copy(&conn->handshake.hashes->hash_workspace, &conn->handshake.hashes->sha256));
+ POSIX_GUARD(s2n_hash_digest(&conn->handshake.hashes->hash_workspace, sha_digest, SHA256_DIGEST_LENGTH));
sha.size = SHA256_DIGEST_LENGTH;
break;
case S2N_HMAC_SHA384:
- GUARD(s2n_hash_copy(&conn->handshake.prf_tls12_hash_copy, &conn->handshake.sha384));
- GUARD(s2n_hash_digest(&conn->handshake.prf_tls12_hash_copy, sha_digest, SHA384_DIGEST_LENGTH));
+ POSIX_GUARD(s2n_hash_copy(&conn->handshake.hashes->hash_workspace, &conn->handshake.hashes->sha384));
+ POSIX_GUARD(s2n_hash_digest(&conn->handshake.hashes->hash_workspace, sha_digest, SHA384_DIGEST_LENGTH));
sha.size = SHA384_DIGEST_LENGTH;
break;
default:
- S2N_ERROR(S2N_ERR_PRF_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_PRF_INVALID_ALGORITHM);
}
sha.data = sha_digest;
return s2n_prf(conn, &master_secret, &label, &sha, NULL, NULL, &client_finished);
}
- GUARD(s2n_hash_copy(&conn->handshake.prf_md5_hash_copy, &conn->handshake.md5));
- GUARD(s2n_hash_copy(&conn->handshake.prf_sha1_hash_copy, &conn->handshake.sha1));
-
- GUARD(s2n_hash_digest(&conn->handshake.prf_md5_hash_copy, md5_digest, MD5_DIGEST_LENGTH));
- GUARD(s2n_hash_digest(&conn->handshake.prf_sha1_hash_copy, sha_digest, SHA_DIGEST_LENGTH));
+ POSIX_GUARD(s2n_hash_copy(&conn->handshake.hashes->hash_workspace, &conn->handshake.hashes->md5));
+ POSIX_GUARD(s2n_hash_digest(&conn->handshake.hashes->hash_workspace, md5_digest, MD5_DIGEST_LENGTH));
md5.data = md5_digest;
md5.size = MD5_DIGEST_LENGTH;
+
+ POSIX_GUARD(s2n_hash_copy(&conn->handshake.hashes->hash_workspace, &conn->handshake.hashes->sha1));
+ POSIX_GUARD(s2n_hash_digest(&conn->handshake.hashes->hash_workspace, sha_digest, SHA_DIGEST_LENGTH));
sha.data = sha_digest;
sha.size = SHA_DIGEST_LENGTH;
@@ -523,6 +725,9 @@ int s2n_prf_client_finished(struct s2n_connection *conn)
int s2n_prf_server_finished(struct s2n_connection *conn)
{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->handshake.hashes);
+
struct s2n_blob master_secret, md5, sha;
uint8_t md5_digest[MD5_DIGEST_LENGTH];
uint8_t sha_digest[SHA384_DIGEST_LENGTH];
@@ -539,35 +744,35 @@ int s2n_prf_server_finished(struct s2n_connection *conn)
label.data = server_finished_label;
label.size = sizeof(server_finished_label) - 1;
- master_secret.data = conn->secure.master_secret;
- master_secret.size = sizeof(conn->secure.master_secret);
+ master_secret.data = conn->secrets.tls12.master_secret;
+ master_secret.size = sizeof(conn->secrets.tls12.master_secret);
if (conn->actual_protocol_version == S2N_TLS12) {
switch (conn->secure.cipher_suite->prf_alg) {
case S2N_HMAC_SHA256:
- GUARD(s2n_hash_copy(&conn->handshake.prf_tls12_hash_copy, &conn->handshake.sha256));
- GUARD(s2n_hash_digest(&conn->handshake.prf_tls12_hash_copy, sha_digest, SHA256_DIGEST_LENGTH));
+ POSIX_GUARD(s2n_hash_copy(&conn->handshake.hashes->hash_workspace, &conn->handshake.hashes->sha256));
+ POSIX_GUARD(s2n_hash_digest(&conn->handshake.hashes->hash_workspace, sha_digest, SHA256_DIGEST_LENGTH));
sha.size = SHA256_DIGEST_LENGTH;
break;
case S2N_HMAC_SHA384:
- GUARD(s2n_hash_copy(&conn->handshake.prf_tls12_hash_copy, &conn->handshake.sha384));
- GUARD(s2n_hash_digest(&conn->handshake.prf_tls12_hash_copy, sha_digest, SHA384_DIGEST_LENGTH));
+ POSIX_GUARD(s2n_hash_copy(&conn->handshake.hashes->hash_workspace, &conn->handshake.hashes->sha384));
+ POSIX_GUARD(s2n_hash_digest(&conn->handshake.hashes->hash_workspace, sha_digest, SHA384_DIGEST_LENGTH));
sha.size = SHA384_DIGEST_LENGTH;
break;
default:
- S2N_ERROR(S2N_ERR_PRF_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_PRF_INVALID_ALGORITHM);
}
sha.data = sha_digest;
return s2n_prf(conn, &master_secret, &label, &sha, NULL, NULL, &server_finished);
}
- GUARD(s2n_hash_copy(&conn->handshake.prf_md5_hash_copy, &conn->handshake.md5));
- GUARD(s2n_hash_copy(&conn->handshake.prf_sha1_hash_copy, &conn->handshake.sha1));
-
- GUARD(s2n_hash_digest(&conn->handshake.prf_md5_hash_copy, md5_digest, MD5_DIGEST_LENGTH));
- GUARD(s2n_hash_digest(&conn->handshake.prf_sha1_hash_copy, sha_digest, SHA_DIGEST_LENGTH));
+ POSIX_GUARD(s2n_hash_copy(&conn->handshake.hashes->hash_workspace, &conn->handshake.hashes->md5));
+ POSIX_GUARD(s2n_hash_digest(&conn->handshake.hashes->hash_workspace, md5_digest, MD5_DIGEST_LENGTH));
md5.data = md5_digest;
md5.size = MD5_DIGEST_LENGTH;
+
+ POSIX_GUARD(s2n_hash_copy(&conn->handshake.hashes->hash_workspace, &conn->handshake.hashes->sha1));
+ POSIX_GUARD(s2n_hash_digest(&conn->handshake.hashes->hash_workspace, sha_digest, SHA_DIGEST_LENGTH));
sha.data = sha_digest;
sha.size = SHA_DIGEST_LENGTH;
@@ -579,12 +784,12 @@ static int s2n_prf_make_client_key(struct s2n_connection *conn, struct s2n_stuff
struct s2n_blob client_key = {0};
client_key.size = conn->secure.cipher_suite->record_alg->cipher->key_material_size;
client_key.data = s2n_stuffer_raw_read(key_material, client_key.size);
- notnull_check(client_key.data);
+ POSIX_ENSURE_REF(client_key.data);
if (conn->mode == S2N_CLIENT) {
- GUARD(conn->secure.cipher_suite->record_alg->cipher->set_encryption_key(&conn->secure.client_key, &client_key));
+ POSIX_GUARD(conn->secure.cipher_suite->record_alg->cipher->set_encryption_key(&conn->secure.client_key, &client_key));
} else {
- GUARD(conn->secure.cipher_suite->record_alg->cipher->set_decryption_key(&conn->secure.client_key, &client_key));
+ POSIX_GUARD(conn->secure.cipher_suite->record_alg->cipher->set_decryption_key(&conn->secure.client_key, &client_key));
}
return 0;
@@ -595,12 +800,12 @@ static int s2n_prf_make_server_key(struct s2n_connection *conn, struct s2n_stuff
struct s2n_blob server_key = {0};
server_key.size = conn->secure.cipher_suite->record_alg->cipher->key_material_size;
server_key.data = s2n_stuffer_raw_read(key_material, server_key.size);
- notnull_check(server_key.data);
+ POSIX_ENSURE_REF(server_key.data);
if (conn->mode == S2N_SERVER) {
- GUARD(conn->secure.cipher_suite->record_alg->cipher->set_encryption_key(&conn->secure.server_key, &server_key));
+ POSIX_GUARD(conn->secure.cipher_suite->record_alg->cipher->set_encryption_key(&conn->secure.server_key, &server_key));
} else {
- GUARD(conn->secure.cipher_suite->record_alg->cipher->set_decryption_key(&conn->secure.server_key, &server_key));
+ POSIX_GUARD(conn->secure.cipher_suite->record_alg->cipher->set_decryption_key(&conn->secure.server_key, &server_key));
}
return 0;
@@ -608,58 +813,58 @@ static int s2n_prf_make_server_key(struct s2n_connection *conn, struct s2n_stuff
int s2n_prf_key_expansion(struct s2n_connection *conn)
{
- struct s2n_blob client_random = {.data = conn->secure.client_random,.size = sizeof(conn->secure.client_random) };
- struct s2n_blob server_random = {.data = conn->secure.server_random,.size = sizeof(conn->secure.server_random) };
- struct s2n_blob master_secret = {.data = conn->secure.master_secret,.size = sizeof(conn->secure.master_secret) };
+ struct s2n_blob client_random = {.data = conn->handshake_params.client_random,.size = sizeof(conn->handshake_params.client_random) };
+ struct s2n_blob server_random = {.data = conn->handshake_params.server_random,.size = sizeof(conn->handshake_params.server_random) };
+ struct s2n_blob master_secret = {.data = conn->secrets.tls12.master_secret,.size = sizeof(conn->secrets.tls12.master_secret) };
struct s2n_blob label, out;
uint8_t key_expansion_label[] = "key expansion";
uint8_t key_block[S2N_MAX_KEY_BLOCK_LEN];
label.data = key_expansion_label;
label.size = sizeof(key_expansion_label) - 1;
- GUARD(s2n_blob_init(&out, key_block, sizeof(key_block)));
+ POSIX_GUARD(s2n_blob_init(&out, key_block, sizeof(key_block)));
struct s2n_stuffer key_material = {0};
- GUARD(s2n_prf(conn, &master_secret, &label, &server_random, &client_random, NULL, &out));
- GUARD(s2n_stuffer_init(&key_material, &out));
- GUARD(s2n_stuffer_write(&key_material, &out));
+ POSIX_GUARD(s2n_prf(conn, &master_secret, &label, &server_random, &client_random, NULL, &out));
+ POSIX_GUARD(s2n_stuffer_init(&key_material, &out));
+ POSIX_GUARD(s2n_stuffer_write(&key_material, &out));
- ENSURE_POSIX(conn->secure.cipher_suite->available, S2N_ERR_PRF_INVALID_ALGORITHM);
- GUARD(conn->secure.cipher_suite->record_alg->cipher->init(&conn->secure.client_key));
- GUARD(conn->secure.cipher_suite->record_alg->cipher->init(&conn->secure.server_key));
+ POSIX_ENSURE(conn->secure.cipher_suite->available, S2N_ERR_PRF_INVALID_ALGORITHM);
+ POSIX_GUARD(conn->secure.cipher_suite->record_alg->cipher->init(&conn->secure.client_key));
+ POSIX_GUARD(conn->secure.cipher_suite->record_alg->cipher->init(&conn->secure.server_key));
/* Check that we have a valid MAC and key size */
uint8_t mac_size;
if (conn->secure.cipher_suite->record_alg->cipher->type == S2N_COMPOSITE) {
mac_size = conn->secure.cipher_suite->record_alg->cipher->io.comp.mac_key_size;
} else {
- GUARD(s2n_hmac_digest_size(conn->secure.cipher_suite->record_alg->hmac_alg, &mac_size));
+ POSIX_GUARD(s2n_hmac_digest_size(conn->secure.cipher_suite->record_alg->hmac_alg, &mac_size));
}
/* Seed the client MAC */
uint8_t *client_mac_write_key = s2n_stuffer_raw_read(&key_material, mac_size);
- notnull_check(client_mac_write_key);
- GUARD(s2n_hmac_reset(&conn->secure.client_record_mac));
- GUARD(s2n_hmac_init(&conn->secure.client_record_mac, conn->secure.cipher_suite->record_alg->hmac_alg, client_mac_write_key, mac_size));
+ POSIX_ENSURE_REF(client_mac_write_key);
+ POSIX_GUARD(s2n_hmac_reset(&conn->secure.client_record_mac));
+ POSIX_GUARD(s2n_hmac_init(&conn->secure.client_record_mac, conn->secure.cipher_suite->record_alg->hmac_alg, client_mac_write_key, mac_size));
/* Seed the server MAC */
uint8_t *server_mac_write_key = s2n_stuffer_raw_read(&key_material, mac_size);
- notnull_check(server_mac_write_key);
- GUARD(s2n_hmac_reset(&conn->secure.server_record_mac));
- GUARD(s2n_hmac_init(&conn->secure.server_record_mac, conn->secure.cipher_suite->record_alg->hmac_alg, server_mac_write_key, mac_size));
+ POSIX_ENSURE_REF(server_mac_write_key);
+ POSIX_GUARD(s2n_hmac_reset(&conn->secure.server_record_mac));
+ POSIX_GUARD(s2n_hmac_init(&conn->secure.server_record_mac, conn->secure.cipher_suite->record_alg->hmac_alg, server_mac_write_key, mac_size));
/* Make the client key */
- GUARD(s2n_prf_make_client_key(conn, &key_material));
+ POSIX_GUARD(s2n_prf_make_client_key(conn, &key_material));
/* Make the server key */
- GUARD(s2n_prf_make_server_key(conn, &key_material));
+ POSIX_GUARD(s2n_prf_make_server_key(conn, &key_material));
/* Composite CBC does MAC inside the cipher, pass it the MAC key.
* Must happen after setting encryption/decryption keys.
*/
if (conn->secure.cipher_suite->record_alg->cipher->type == S2N_COMPOSITE) {
- GUARD(conn->secure.cipher_suite->record_alg->cipher->io.comp.set_mac_write_key(&conn->secure.server_key, server_mac_write_key, mac_size));
- GUARD(conn->secure.cipher_suite->record_alg->cipher->io.comp.set_mac_write_key(&conn->secure.client_key, client_mac_write_key, mac_size));
+ POSIX_GUARD(conn->secure.cipher_suite->record_alg->cipher->io.comp.set_mac_write_key(&conn->secure.server_key, server_mac_write_key, mac_size));
+ POSIX_GUARD(conn->secure.cipher_suite->record_alg->cipher->io.comp.set_mac_write_key(&conn->secure.client_key, client_mac_write_key, mac_size));
}
/* TLS >= 1.1 has no implicit IVs for non AEAD ciphers */
@@ -685,8 +890,8 @@ int s2n_prf_key_expansion(struct s2n_connection *conn)
struct s2n_blob client_implicit_iv = {.data = conn->secure.client_implicit_iv,.size = implicit_iv_size };
struct s2n_blob server_implicit_iv = {.data = conn->secure.server_implicit_iv,.size = implicit_iv_size };
- GUARD(s2n_stuffer_read(&key_material, &client_implicit_iv));
- GUARD(s2n_stuffer_read(&key_material, &server_implicit_iv));
+ POSIX_GUARD(s2n_stuffer_read(&key_material, &client_implicit_iv));
+ POSIX_GUARD(s2n_stuffer_read(&key_material, &server_implicit_iv));
return 0;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_prf.h b/contrib/restricted/aws/s2n/tls/s2n_prf.h
index a3679436b2..cdf8414328 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_prf.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_prf.h
@@ -26,25 +26,15 @@
/* Enough to support TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, 2*SHA384_DIGEST_LEN + 2*AES256_KEY_SIZE */
#define S2N_MAX_KEY_BLOCK_LEN 160
-struct p_hash_state {
+union p_hash_state {
struct s2n_hmac_state s2n_hmac;
struct s2n_evp_hmac_state evp_hmac;
};
struct s2n_prf_working_space {
- struct {
- const struct s2n_p_hash_hmac *p_hash_hmac_impl;
- struct p_hash_state p_hash;
- uint8_t digest0[S2N_MAX_DIGEST_LEN];
- uint8_t digest1[S2N_MAX_DIGEST_LEN];
- } tls;
-
- struct {
- struct s2n_hash_state md5;
- struct s2n_hash_state sha1;
- uint8_t md5_digest[MD5_DIGEST_LENGTH];
- uint8_t sha1_digest[SHA_DIGEST_LENGTH];
- } ssl3;
+ union p_hash_state p_hash;
+ uint8_t digest0[S2N_MAX_DIGEST_LEN];
+ uint8_t digest1[S2N_MAX_DIGEST_LEN];
};
/* The s2n p_hash implementation is abstracted to allow for separate implementations, using
@@ -61,10 +51,15 @@ struct s2n_p_hash_hmac {
#include "tls/s2n_connection.h"
-extern int s2n_prf_new(struct s2n_connection *conn);
-extern int s2n_prf_free(struct s2n_connection *conn);
+S2N_RESULT s2n_prf_new(struct s2n_connection *conn);
+S2N_RESULT s2n_prf_wipe(struct s2n_connection *conn);
+S2N_RESULT s2n_prf_free(struct s2n_connection *conn);
+
+int s2n_prf_calculate_master_secret(struct s2n_connection *conn, struct s2n_blob *premaster_secret);
extern int s2n_tls_prf_master_secret(struct s2n_connection *conn, struct s2n_blob *premaster_secret);
extern int s2n_hybrid_prf_master_secret(struct s2n_connection *conn, struct s2n_blob *premaster_secret);
+S2N_RESULT s2n_tls_prf_extended_master_secret(struct s2n_connection *conn, struct s2n_blob *premaster_secret, struct s2n_blob *session_hash, struct s2n_blob *sha1_hash);
+S2N_RESULT s2n_prf_get_digest_for_ems(struct s2n_connection *conn, struct s2n_blob *message, s2n_hash_algorithm hash_alg, struct s2n_blob *output);
extern int s2n_prf_key_expansion(struct s2n_connection *conn);
extern int s2n_prf_server_finished(struct s2n_connection *conn);
extern int s2n_prf_client_finished(struct s2n_connection *conn);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_protocol_preferences.c b/contrib/restricted/aws/s2n/tls/s2n_protocol_preferences.c
index e0157f4ae4..88212161c8 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_protocol_preferences.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_protocol_preferences.c
@@ -17,39 +17,146 @@
#include "error/s2n_errno.h"
#include "utils/s2n_safety.h"
-int s2n_blob_set_protocol_preferences(struct s2n_blob *application_protocols, const char *const *protocols, int protocol_count)
+S2N_RESULT s2n_protocol_preferences_read(struct s2n_stuffer *protocol_preferences, struct s2n_blob *protocol)
{
- struct s2n_stuffer protocol_stuffer = {0};
+ RESULT_ENSURE_REF(protocol_preferences);
+ RESULT_ENSURE_REF(protocol);
- GUARD(s2n_free(application_protocols));
+ uint8_t length = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint8(protocol_preferences, &length));
+ RESULT_ENSURE_GT(length, 0);
+ uint8_t *data = s2n_stuffer_raw_read(protocol_preferences, length);
+ RESULT_ENSURE_REF(data);
+
+ RESULT_GUARD_POSIX(s2n_blob_init(protocol, data, length));
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_protocol_preferences_contain(struct s2n_blob *protocol_preferences, struct s2n_blob *protocol, bool *contains)
+{
+ RESULT_ENSURE_REF(contains);
+ *contains = false;
+ RESULT_ENSURE_REF(protocol_preferences);
+ RESULT_ENSURE_REF(protocol);
+
+ struct s2n_stuffer app_protocols_stuffer = { 0 };
+ RESULT_GUARD_POSIX(s2n_stuffer_init(&app_protocols_stuffer, protocol_preferences));
+ RESULT_GUARD_POSIX(s2n_stuffer_skip_write(&app_protocols_stuffer, protocol_preferences->size));
+
+ while (s2n_stuffer_data_available(&app_protocols_stuffer) > 0) {
+ struct s2n_blob match_against = { 0 };
+ RESULT_GUARD(s2n_protocol_preferences_read(&app_protocols_stuffer, &match_against));
+
+ if (match_against.size == protocol->size && memcmp(match_against.data, protocol->data, protocol->size) == 0) {
+ *contains = true;
+ return S2N_RESULT_OK;
+ }
+ }
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_protocol_preferences_append(struct s2n_blob *application_protocols, const uint8_t *protocol, uint8_t protocol_len)
+{
+ RESULT_ENSURE_MUT(application_protocols);
+ RESULT_ENSURE_REF(protocol);
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc7301#section-3.1
+ *# Empty strings
+ *# MUST NOT be included and byte strings MUST NOT be truncated.
+ */
+ RESULT_ENSURE(protocol_len != 0, S2N_ERR_INVALID_APPLICATION_PROTOCOL);
+
+ uint32_t prev_len = application_protocols->size;
+ uint32_t new_len = prev_len + /* len prefix */ 1 + protocol_len;
+ RESULT_ENSURE(new_len <= UINT16_MAX, S2N_ERR_INVALID_APPLICATION_PROTOCOL);
+
+ RESULT_GUARD_POSIX(s2n_realloc(application_protocols, new_len));
+
+ struct s2n_stuffer protocol_stuffer = { 0 };
+ RESULT_GUARD_POSIX(s2n_stuffer_init(&protocol_stuffer, application_protocols));
+ RESULT_GUARD_POSIX(s2n_stuffer_skip_write(&protocol_stuffer, prev_len));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint8(&protocol_stuffer, protocol_len));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_bytes(&protocol_stuffer, protocol, protocol_len));
+
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_protocol_preferences_set(struct s2n_blob *application_protocols, const char *const *protocols, int protocol_count)
+{
+ RESULT_ENSURE_MUT(application_protocols);
+
+ /* NULL value indicates no preference so free the previous blob */
if (protocols == NULL || protocol_count == 0) {
- /* NULL value indicates no preference, so nothing to do */
- return 0;
+ RESULT_GUARD_POSIX(s2n_free(application_protocols));
+ return S2N_RESULT_OK;
}
- GUARD(s2n_stuffer_growable_alloc(&protocol_stuffer, 256));
- for (int i = 0; i < protocol_count; i++) {
+ DEFER_CLEANUP(struct s2n_blob new_protocols = { 0 }, s2n_free);
+
+ /* Allocate enough space to avoid a reallocation for every entry
+ *
+ * We assume that each protocol is most likely 8 bytes or less.
+ * If it ends up being larger, we will expand the blob automatically
+ * in the append method.
+ */
+ RESULT_GUARD_POSIX(s2n_realloc(&new_protocols, protocol_count * 8));
+
+ /* set the size back to 0 so we start at the beginning.
+ * s2n_realloc will just update the size field here
+ */
+ RESULT_GUARD_POSIX(s2n_realloc(&new_protocols, 0));
+
+ for (size_t i = 0; i < protocol_count; i++) {
+ const uint8_t * protocol = (const uint8_t *)protocols[i];
size_t length = strlen(protocols[i]);
- uint8_t protocol[255];
- S2N_ERROR_IF(length > 255 || (s2n_stuffer_data_available(&protocol_stuffer) + length + 1) > 65535, S2N_ERR_APPLICATION_PROTOCOL_TOO_LONG);
- memcpy_check(protocol, protocols[i], length);
- GUARD(s2n_stuffer_write_uint8(&protocol_stuffer, length));
- GUARD(s2n_stuffer_write_bytes(&protocol_stuffer, protocol, length));
+ /**
+ *= https://tools.ietf.org/rfc/rfc7301#section-3.1
+ *# Empty strings
+ *# MUST NOT be included and byte strings MUST NOT be truncated.
+ */
+ RESULT_ENSURE(length < 256, S2N_ERR_INVALID_APPLICATION_PROTOCOL);
+
+ RESULT_GUARD(s2n_protocol_preferences_append(&new_protocols, protocol, (uint8_t)length));
}
- GUARD(s2n_stuffer_extract_blob(&protocol_stuffer, application_protocols));
- GUARD(s2n_stuffer_free(&protocol_stuffer));
- return 0;
+ /* now we can free the previous list since we've validated all new input */
+ RESULT_GUARD_POSIX(s2n_free(application_protocols));
+
+ /* update the connection/config application_protocols with the newly allocated blob */
+ *application_protocols = new_protocols;
+
+ /* zero out new_protocols so the DEFER_CLEANUP from above doesn't free
+ * the blob that we created and assigned to application_protocols
+ */
+ /* cppcheck-suppress unreadVariable */
+ new_protocols = (struct s2n_blob){ 0 };
+
+ return S2N_RESULT_OK;
}
int s2n_config_set_protocol_preferences(struct s2n_config *config, const char *const *protocols, int protocol_count)
{
- return s2n_blob_set_protocol_preferences(&config->application_protocols, protocols, protocol_count);
+ POSIX_GUARD_RESULT(s2n_protocol_preferences_set(&config->application_protocols, protocols, protocol_count));
+ return S2N_SUCCESS;
+}
+
+int s2n_config_append_protocol_preference(struct s2n_config *config, const uint8_t *protocol, uint8_t protocol_len)
+{
+ POSIX_GUARD_RESULT(s2n_protocol_preferences_append(&config->application_protocols, protocol, protocol_len));
+ return S2N_SUCCESS;
}
int s2n_connection_set_protocol_preferences(struct s2n_connection *conn, const char * const *protocols, int protocol_count)
{
- return s2n_blob_set_protocol_preferences(&conn->application_protocols_overridden, protocols, protocol_count);
+ POSIX_GUARD_RESULT(s2n_protocol_preferences_set(&conn->application_protocols_overridden, protocols, protocol_count));
+ return S2N_SUCCESS;
+}
+
+int s2n_connection_append_protocol_preference(struct s2n_connection *conn, const uint8_t *protocol, uint8_t protocol_len)
+{
+ POSIX_GUARD_RESULT(s2n_protocol_preferences_append(&conn->application_protocols_overridden, protocol, protocol_len));
+ return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_protocol_preferences.h b/contrib/restricted/aws/s2n/tls/s2n_protocol_preferences.h
new file mode 100644
index 0000000000..421dfae6a7
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_protocol_preferences.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#pragma once
+
+#include "api/s2n.h"
+
+#include "utils/s2n_result.h"
+
+S2N_RESULT s2n_protocol_preferences_read(struct s2n_stuffer *protocol_preferences, struct s2n_blob *protocol);
+S2N_RESULT s2n_protocol_preferences_contain(struct s2n_blob *protocol_preferences, struct s2n_blob *protocol, bool *contains);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_psk.c b/contrib/restricted/aws/s2n/tls/s2n_psk.c
index 59d4d75158..4aacb13d87 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_psk.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_psk.c
@@ -13,11 +13,15 @@
* permissions and limitations under the License.
*/
+#include <sys/param.h>
+
#include "crypto/s2n_tls13_keys.h"
#include "tls/s2n_handshake.h"
#include "tls/s2n_tls13_handshake.h"
#include "tls/s2n_tls.h"
+#include "tls/extensions/s2n_extension_type.h"
+#include "tls/s2n_tls13_secrets.h"
#include "utils/s2n_array.h"
#include "utils/s2n_mem.h"
@@ -25,100 +29,373 @@
#define S2N_HASH_ALG_COUNT S2N_HASH_SENTINEL
-int s2n_psk_init(struct s2n_psk *psk, s2n_psk_type type)
+S2N_RESULT s2n_psk_init(struct s2n_psk *psk, s2n_psk_type type)
{
- notnull_check(psk);
+ RESULT_ENSURE_MUT(psk);
- memset_check(psk, 0, sizeof(struct s2n_psk));
+ RESULT_CHECKED_MEMSET(psk, 0, sizeof(struct s2n_psk));
psk->hmac_alg = S2N_HMAC_SHA256;
psk->type = type;
- return S2N_SUCCESS;
+ return S2N_RESULT_OK;
}
-int s2n_psk_new_identity(struct s2n_psk *psk, const uint8_t *identity, size_t identity_size)
+struct s2n_psk* s2n_external_psk_new()
{
- notnull_check(psk);
+ DEFER_CLEANUP(struct s2n_blob mem = { 0 }, s2n_free);
+ PTR_GUARD_POSIX(s2n_alloc(&mem, sizeof(struct s2n_psk)));
- GUARD(s2n_realloc(&psk->identity, identity_size));
- memcpy_check(psk->identity.data, identity, identity_size);
+ struct s2n_psk *psk = (struct s2n_psk*)(void*) mem.data;
+ PTR_GUARD_RESULT(s2n_psk_init(psk, S2N_PSK_TYPE_EXTERNAL));
+
+ ZERO_TO_DISABLE_DEFER_CLEANUP(mem);
+ return psk;
+}
+
+int s2n_psk_set_identity(struct s2n_psk *psk, const uint8_t *identity, uint16_t identity_size)
+{
+ POSIX_ENSURE_REF(psk);
+ POSIX_ENSURE_REF(identity);
+ POSIX_ENSURE(identity_size != 0, S2N_ERR_INVALID_ARGUMENT);
+
+ POSIX_GUARD(s2n_realloc(&psk->identity, identity_size));
+ POSIX_CHECKED_MEMCPY(psk->identity.data, identity, identity_size);
return S2N_SUCCESS;
}
-int s2n_psk_new_secret(struct s2n_psk *psk, const uint8_t *secret, size_t secret_size)
+int s2n_psk_set_secret(struct s2n_psk *psk, const uint8_t *secret, uint16_t secret_size)
{
- notnull_check(psk);
+ POSIX_ENSURE_REF(psk);
+ POSIX_ENSURE_REF(secret);
+ POSIX_ENSURE(secret_size != 0, S2N_ERR_INVALID_ARGUMENT);
- GUARD(s2n_realloc(&psk->secret, secret_size));
- memcpy_check(psk->secret.data, secret, secret_size);
+ POSIX_GUARD(s2n_realloc(&psk->secret, secret_size));
+ POSIX_CHECKED_MEMCPY(psk->secret.data, secret, secret_size);
return S2N_SUCCESS;
}
-int s2n_psk_free(struct s2n_psk *psk)
+S2N_RESULT s2n_psk_clone(struct s2n_psk *new_psk, struct s2n_psk *original_psk)
+{
+ if (original_psk == NULL) {
+ return S2N_RESULT_OK;
+ }
+ RESULT_ENSURE_REF(new_psk);
+
+ struct s2n_psk psk_copy = *new_psk;
+
+ /* Copy all fields from the old_config EXCEPT the blobs, which we need to reallocate. */
+ *new_psk = *original_psk;
+ new_psk->identity = psk_copy.identity;
+ new_psk->secret = psk_copy.secret;
+ new_psk->early_secret = psk_copy.early_secret;
+ new_psk->early_data_config = psk_copy.early_data_config;
+
+ /* Clone / realloc blobs */
+ RESULT_GUARD_POSIX(s2n_psk_set_identity(new_psk, original_psk->identity.data, original_psk->identity.size));
+ RESULT_GUARD_POSIX(s2n_psk_set_secret(new_psk, original_psk->secret.data, original_psk->secret.size));
+ RESULT_GUARD_POSIX(s2n_realloc(&new_psk->early_secret, original_psk->early_secret.size));
+ RESULT_CHECKED_MEMCPY(new_psk->early_secret.data, original_psk->early_secret.data, original_psk->early_secret.size);
+ RESULT_GUARD(s2n_early_data_config_clone(new_psk, &original_psk->early_data_config));
+
+ return S2N_RESULT_OK;
+}
+
+S2N_CLEANUP_RESULT s2n_psk_wipe(struct s2n_psk *psk)
{
if (psk == NULL) {
- return S2N_SUCCESS;
+ return S2N_RESULT_OK;
}
- GUARD(s2n_free(&psk->early_secret));
- GUARD(s2n_free(&psk->identity));
- GUARD(s2n_free(&psk->secret));
+ RESULT_GUARD_POSIX(s2n_free(&psk->early_secret));
+ RESULT_GUARD_POSIX(s2n_free(&psk->identity));
+ RESULT_GUARD_POSIX(s2n_free(&psk->secret));
+ RESULT_GUARD(s2n_early_data_config_free(&psk->early_data_config));
- return S2N_SUCCESS;
+ return S2N_RESULT_OK;
+}
+
+int s2n_psk_free(struct s2n_psk **psk)
+{
+ if (psk == NULL) {
+ return S2N_SUCCESS;
+ }
+ POSIX_GUARD_RESULT(s2n_psk_wipe(*psk));
+ return s2n_free_object((uint8_t **) psk, sizeof(struct s2n_psk));
}
S2N_RESULT s2n_psk_parameters_init(struct s2n_psk_parameters *params)
{
- ENSURE_REF(params);
- CHECKED_MEMSET(params, 0, sizeof(struct s2n_psk_parameters));
- GUARD_RESULT(s2n_array_init(&params->psk_list, sizeof(struct s2n_psk)));
+ RESULT_ENSURE_REF(params);
+ RESULT_CHECKED_MEMSET(params, 0, sizeof(struct s2n_psk_parameters));
+ RESULT_GUARD(s2n_array_init(&params->psk_list, sizeof(struct s2n_psk)));
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_psk_offered_psk_size(struct s2n_psk *psk, uint32_t *size)
+{
+ *size = sizeof(uint16_t) /* identity size */
+ + sizeof(uint32_t) /* obfuscated ticket age */
+ + sizeof(uint8_t) /* binder size */;
+
+ RESULT_GUARD_POSIX(s2n_add_overflow(*size, psk->identity.size, size));
+
+ uint8_t binder_size = 0;
+ RESULT_GUARD_POSIX(s2n_hmac_digest_size(psk->hmac_alg, &binder_size));
+ RESULT_GUARD_POSIX(s2n_add_overflow(*size, binder_size, size));
+
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_psk_parameters_offered_psks_size(struct s2n_psk_parameters *params, uint32_t *size)
+{
+ RESULT_ENSURE_REF(params);
+ RESULT_ENSURE_REF(size);
+
+ *size = sizeof(uint16_t) /* identity list size */
+ + sizeof(uint16_t) /* binder list size */;
+
+ for (uint32_t i = 0; i < params->psk_list.len; i++) {
+ struct s2n_psk *psk = NULL;
+ RESULT_GUARD(s2n_array_get(&params->psk_list, i, (void**)&psk));
+ RESULT_ENSURE_REF(psk);
+
+ uint32_t psk_size = 0;
+ RESULT_GUARD(s2n_psk_offered_psk_size(psk, &psk_size));
+ RESULT_GUARD_POSIX(s2n_add_overflow(*size, psk_size, size));
+ }
return S2N_RESULT_OK;
}
S2N_CLEANUP_RESULT s2n_psk_parameters_wipe(struct s2n_psk_parameters *params)
{
- ENSURE_REF(params);
+ RESULT_ENSURE_REF(params);
for (size_t i = 0; i < params->psk_list.len; i++) {
- struct s2n_psk *psk;
- GUARD_RESULT(s2n_array_get(&params->psk_list, i, (void**)&psk));
- GUARD_AS_RESULT(s2n_psk_free(psk));
+ struct s2n_psk *psk = NULL;
+ RESULT_GUARD(s2n_array_get(&params->psk_list, i, (void**)&psk));
+ RESULT_GUARD(s2n_psk_wipe(psk));
+ }
+ RESULT_GUARD_POSIX(s2n_free(&params->psk_list.mem));
+ RESULT_GUARD(s2n_psk_parameters_init(params));
+
+ return S2N_RESULT_OK;
+}
+
+S2N_CLEANUP_RESULT s2n_psk_parameters_wipe_secrets(struct s2n_psk_parameters *params)
+{
+ RESULT_ENSURE_REF(params);
+
+ for (size_t i = 0; i < params->psk_list.len; i++) {
+ struct s2n_psk *psk = NULL;
+ RESULT_GUARD(s2n_array_get(&params->psk_list, i, (void**)&psk));
+ RESULT_ENSURE_REF(psk);
+ RESULT_GUARD_POSIX(s2n_free(&psk->early_secret));
+ RESULT_GUARD_POSIX(s2n_free(&psk->secret));
+ }
+
+ return S2N_RESULT_OK;
+}
+
+bool s2n_offered_psk_list_has_next(struct s2n_offered_psk_list *psk_list)
+{
+ return psk_list != NULL && s2n_stuffer_data_available(&psk_list->wire_data) > 0;
+}
+
+S2N_RESULT s2n_offered_psk_list_read_next(struct s2n_offered_psk_list *psk_list, struct s2n_offered_psk *psk)
+{
+ RESULT_ENSURE_REF(psk_list);
+ RESULT_ENSURE_REF(psk_list->conn);
+ RESULT_ENSURE_MUT(psk);
+
+ uint16_t identity_size = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint16(&psk_list->wire_data, &identity_size));
+ RESULT_ENSURE_GT(identity_size, 0);
+
+ uint8_t *identity_data = NULL;
+ identity_data = s2n_stuffer_raw_read(&psk_list->wire_data, identity_size);
+ RESULT_ENSURE_REF(identity_data);
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.11
+ *# For identities established externally, an obfuscated_ticket_age of 0 SHOULD be
+ *# used, and servers MUST ignore the value.
+ */
+ if (psk_list->conn->psk_params.type == S2N_PSK_TYPE_EXTERNAL) {
+ RESULT_GUARD_POSIX(s2n_stuffer_skip_read(&psk_list->wire_data, sizeof(uint32_t)));
+ } else {
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint32(&psk_list->wire_data, &psk->obfuscated_ticket_age));
+ }
+
+ RESULT_GUARD_POSIX(s2n_blob_init(&psk->identity, identity_data, identity_size));
+ psk->wire_index = psk_list->wire_index;
+
+ RESULT_ENSURE(psk_list->wire_index < UINT16_MAX, S2N_ERR_INTEGER_OVERFLOW);
+ psk_list->wire_index++;
+ return S2N_RESULT_OK;
+}
+
+int s2n_offered_psk_list_next(struct s2n_offered_psk_list *psk_list, struct s2n_offered_psk *psk)
+{
+ POSIX_ENSURE_REF(psk_list);
+ POSIX_ENSURE_REF(psk);
+ *psk = (struct s2n_offered_psk){ 0 };
+ POSIX_ENSURE(s2n_offered_psk_list_has_next(psk_list), S2N_ERR_STUFFER_OUT_OF_DATA);
+ POSIX_ENSURE(s2n_result_is_ok(s2n_offered_psk_list_read_next(psk_list, psk)), S2N_ERR_BAD_MESSAGE);
+ return S2N_SUCCESS;
+}
+
+int s2n_offered_psk_list_reread(struct s2n_offered_psk_list *psk_list)
+{
+ POSIX_ENSURE_REF(psk_list);
+ psk_list->wire_index = 0;
+ return s2n_stuffer_reread(&psk_list->wire_data);
+}
+
+/* Match a PSK identity received from the client against the server's known PSK identities.
+ * This method compares a single client identity to all server identities.
+ *
+ * While both the client's offered identities and whether a match was found are public, we should make an attempt
+ * to keep the server's known identities a secret. We will make comparisons to the server's identities constant
+ * time (to hide partial matches) and not end the search early when a match is found (to hide the ordering).
+ *
+ * Keeping these comparisons constant time is not high priority. There's no known attack using these timings,
+ * and an attacker could probably guess the server's known identities just by observing the public identities
+ * sent by clients.
+ */
+static S2N_RESULT s2n_match_psk_identity(struct s2n_array *known_psks, const struct s2n_blob *wire_identity,
+ struct s2n_psk **match)
+{
+ RESULT_ENSURE_REF(match);
+ RESULT_ENSURE_REF(wire_identity);
+ RESULT_ENSURE_REF(known_psks);
+ *match = NULL;
+ for (size_t i = 0; i < known_psks->len; i++) {
+ struct s2n_psk *psk = NULL;
+ RESULT_GUARD(s2n_array_get(known_psks, i, (void**)&psk));
+ RESULT_ENSURE_REF(psk);
+ RESULT_ENSURE_REF(psk->identity.data);
+ RESULT_ENSURE_REF(wire_identity->data);
+ uint32_t compare_size = MIN(wire_identity->size, psk->identity.size);
+ if (s2n_constant_time_equals(psk->identity.data, wire_identity->data, compare_size)
+ & (psk->identity.size == wire_identity->size) & (!*match)) {
+ *match = psk;
+ }
+ }
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.2.10
+ *# For PSKs provisioned via NewSessionTicket, a server MUST validate
+ *# that the ticket age for the selected PSK identity (computed by
+ *# subtracting ticket_age_add from PskIdentity.obfuscated_ticket_age
+ *# modulo 2^32) is within a small tolerance of the time since the ticket
+ *# was issued (see Section 8).
+ **/
+static S2N_RESULT s2n_validate_ticket_lifetime(struct s2n_connection *conn, uint32_t obfuscated_ticket_age, uint32_t ticket_age_add)
+{
+ RESULT_ENSURE_REF(conn);
+
+ if (conn->psk_params.type == S2N_PSK_TYPE_EXTERNAL) {
+ return S2N_RESULT_OK;
}
- GUARD_AS_RESULT(s2n_free(&params->psk_list.mem));
- GUARD_RESULT(s2n_psk_parameters_init(params));
+
+ /* Subtract the ticket_age_add value from the ticket age in milliseconds. The resulting uint32_t value
+ * may wrap, resulting in the modulo 2^32 operation. */
+ uint32_t ticket_age_in_millis = obfuscated_ticket_age - ticket_age_add;
+ uint32_t session_lifetime_in_millis = conn->config->session_state_lifetime_in_nanos / ONE_MILLISEC_IN_NANOS;
+ RESULT_ENSURE(ticket_age_in_millis < session_lifetime_in_millis, S2N_ERR_INVALID_SESSION_TICKET);
return S2N_RESULT_OK;
}
+int s2n_offered_psk_list_choose_psk(struct s2n_offered_psk_list *psk_list, struct s2n_offered_psk *psk)
+{
+ POSIX_ENSURE_REF(psk_list);
+ POSIX_ENSURE_REF(psk_list->conn);
+
+ struct s2n_psk_parameters *psk_params = &psk_list->conn->psk_params;
+ struct s2n_stuffer ticket_stuffer = { 0 };
+
+ if (!psk) {
+ psk_params->chosen_psk = NULL;
+ return S2N_SUCCESS;
+ }
+
+ if (psk_params->type == S2N_PSK_TYPE_RESUMPTION && psk_list->conn->config->use_tickets) {
+ POSIX_GUARD(s2n_stuffer_init(&ticket_stuffer, &psk->identity));
+ POSIX_GUARD(s2n_stuffer_skip_write(&ticket_stuffer, psk->identity.size));
+
+ /* s2n_decrypt_session_ticket appends a new PSK with the decrypted values. */
+ POSIX_GUARD(s2n_decrypt_session_ticket(psk_list->conn, &ticket_stuffer));
+ }
+
+ struct s2n_psk *chosen_psk = NULL;
+ POSIX_GUARD_RESULT(s2n_match_psk_identity(&psk_params->psk_list, &psk->identity, &chosen_psk));
+ POSIX_ENSURE_REF(chosen_psk);
+ POSIX_GUARD_RESULT(s2n_validate_ticket_lifetime(psk_list->conn, psk->obfuscated_ticket_age, chosen_psk->ticket_age_add));
+ psk_params->chosen_psk = chosen_psk;
+ psk_params->chosen_psk_wire_index = psk->wire_index;
+
+ return S2N_SUCCESS;
+}
+
+struct s2n_offered_psk* s2n_offered_psk_new()
+{
+ DEFER_CLEANUP(struct s2n_blob mem = { 0 }, s2n_free);
+ PTR_GUARD_POSIX(s2n_alloc(&mem, sizeof(struct s2n_offered_psk)));
+ PTR_GUARD_POSIX(s2n_blob_zero(&mem));
+
+ struct s2n_offered_psk *psk = (struct s2n_offered_psk*)(void*) mem.data;
+
+ ZERO_TO_DISABLE_DEFER_CLEANUP(mem);
+ return psk;
+}
+
+int s2n_offered_psk_free(struct s2n_offered_psk **psk)
+{
+ if (psk == NULL) {
+ return S2N_SUCCESS;
+ }
+ return s2n_free_object((uint8_t **) psk, sizeof(struct s2n_offered_psk));
+}
+
+int s2n_offered_psk_get_identity(struct s2n_offered_psk *psk, uint8_t** identity, uint16_t *size)
+{
+ POSIX_ENSURE_REF(psk);
+ POSIX_ENSURE_REF(identity);
+ POSIX_ENSURE_REF(size);
+ *identity = psk->identity.data;
+ *size = psk->identity.size;
+ return S2N_SUCCESS;
+}
+
/* The binder hash is computed by hashing the concatenation of the current transcript
* and a partial ClientHello that does not include the binders themselves.
*/
int s2n_psk_calculate_binder_hash(struct s2n_connection *conn, s2n_hmac_algorithm hmac_alg,
const struct s2n_blob *partial_client_hello, struct s2n_blob *output_binder_hash)
{
- notnull_check(partial_client_hello);
- notnull_check(output_binder_hash);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(partial_client_hello);
+ POSIX_ENSURE_REF(output_binder_hash);
+ struct s2n_handshake_hashes *hashes = conn->handshake.hashes;
+ POSIX_ENSURE_REF(hashes);
/* Retrieve the current transcript.
* The current transcript will be empty unless this handshake included a HelloRetryRequest. */
- struct s2n_hash_state current_hash_state = {0};
-
- s2n_hash_algorithm hash_alg;
- GUARD(s2n_hmac_hash_alg(hmac_alg, &hash_alg));
- GUARD(s2n_handshake_get_hash_state(conn, hash_alg, &current_hash_state));
-
- /* Copy the current transcript to avoid modifying the original. */
- DEFER_CLEANUP(struct s2n_hash_state hash_copy, s2n_hash_free);
- GUARD(s2n_hash_new(&hash_copy));
- GUARD(s2n_hash_copy(&hash_copy, &current_hash_state));
+ s2n_hash_algorithm hash_alg = S2N_HASH_NONE;
+ struct s2n_hash_state *hash_state = &hashes->hash_workspace;
+ POSIX_GUARD(s2n_hmac_hash_alg(hmac_alg, &hash_alg));
+ POSIX_GUARD_RESULT(s2n_handshake_copy_hash_state(conn, hash_alg, hash_state));
/* Add the partial client hello to the transcript. */
- GUARD(s2n_hash_update(&hash_copy, partial_client_hello->data, partial_client_hello->size));
+ POSIX_GUARD(s2n_hash_update(hash_state, partial_client_hello->data, partial_client_hello->size));
/* Get the transcript digest */
- GUARD(s2n_hash_digest(&hash_copy, output_binder_hash->data, output_binder_hash->size));
+ POSIX_GUARD(s2n_hash_digest(hash_state, output_binder_hash->data, output_binder_hash->size));
return S2N_SUCCESS;
}
@@ -131,29 +408,26 @@ int s2n_psk_calculate_binder_hash(struct s2n_connection *conn, s2n_hmac_algorith
int s2n_psk_calculate_binder(struct s2n_psk *psk, const struct s2n_blob *binder_hash,
struct s2n_blob *output_binder)
{
- notnull_check(psk);
- notnull_check(binder_hash);
- notnull_check(output_binder);
+ POSIX_ENSURE_REF(psk);
+ POSIX_ENSURE_REF(binder_hash);
+ POSIX_ENSURE_REF(output_binder);
DEFER_CLEANUP(struct s2n_tls13_keys psk_keys, s2n_tls13_keys_free);
- GUARD(s2n_tls13_keys_init(&psk_keys, psk->hmac_alg));
- eq_check(binder_hash->size, psk_keys.size);
- eq_check(output_binder->size, psk_keys.size);
-
- /* Make sure the early secret is saved on the psk structure for later use */
- GUARD(s2n_realloc(&psk->early_secret, psk_keys.size));
- GUARD(s2n_blob_init(&psk_keys.extract_secret, psk->early_secret.data, psk_keys.size));
+ POSIX_GUARD(s2n_tls13_keys_init(&psk_keys, psk->hmac_alg));
+ POSIX_ENSURE_EQ(binder_hash->size, psk_keys.size);
+ POSIX_ENSURE_EQ(output_binder->size, psk_keys.size);
/* Derive the binder key */
- GUARD(s2n_tls13_derive_binder_key(&psk_keys, psk));
+ POSIX_GUARD_RESULT(s2n_derive_binder_key(psk, &psk_keys.derive_secret));
+ POSIX_GUARD(s2n_blob_init(&psk_keys.extract_secret, psk->early_secret.data, psk_keys.size));
struct s2n_blob *binder_key = &psk_keys.derive_secret;
/* Expand the binder key into the finished key */
s2n_tls13_key_blob(finished_key, psk_keys.size);
- GUARD(s2n_tls13_derive_finished_key(&psk_keys, binder_key, &finished_key));
+ POSIX_GUARD(s2n_tls13_derive_finished_key(&psk_keys, binder_key, &finished_key));
/* HMAC the binder hash with the binder finished key */
- GUARD(s2n_hkdf_extract(&psk_keys.hmac, psk_keys.hmac_algorithm, &finished_key, binder_hash, output_binder));
+ POSIX_GUARD(s2n_hkdf_extract(&psk_keys.hmac, psk_keys.hmac_algorithm, &finished_key, binder_hash, output_binder));
return S2N_SUCCESS;
}
@@ -161,24 +435,24 @@ int s2n_psk_calculate_binder(struct s2n_psk *psk, const struct s2n_blob *binder_
int s2n_psk_verify_binder(struct s2n_connection *conn, struct s2n_psk *psk,
const struct s2n_blob *partial_client_hello, struct s2n_blob *binder_to_verify)
{
- notnull_check(psk);
- notnull_check(binder_to_verify);
+ POSIX_ENSURE_REF(psk);
+ POSIX_ENSURE_REF(binder_to_verify);
DEFER_CLEANUP(struct s2n_tls13_keys psk_keys, s2n_tls13_keys_free);
- GUARD(s2n_tls13_keys_init(&psk_keys, psk->hmac_alg));
- eq_check(binder_to_verify->size, psk_keys.size);
+ POSIX_GUARD(s2n_tls13_keys_init(&psk_keys, psk->hmac_alg));
+ POSIX_ENSURE_EQ(binder_to_verify->size, psk_keys.size);
/* Calculate the binder hash from the transcript */
s2n_tls13_key_blob(binder_hash, psk_keys.size);
- GUARD(s2n_psk_calculate_binder_hash(conn, psk->hmac_alg, partial_client_hello, &binder_hash));
+ POSIX_GUARD(s2n_psk_calculate_binder_hash(conn, psk->hmac_alg, partial_client_hello, &binder_hash));
/* Calculate the expected binder from the binder hash */
s2n_tls13_key_blob(expected_binder, psk_keys.size);
- GUARD(s2n_psk_calculate_binder(psk, &binder_hash, &expected_binder));
+ POSIX_GUARD(s2n_psk_calculate_binder(psk, &binder_hash, &expected_binder));
/* Verify the expected binder matches the given binder.
* This operation must be constant time. */
- GUARD(s2n_tls13_mac_verify(&psk_keys, &expected_binder, binder_to_verify));
+ POSIX_GUARD(s2n_tls13_mac_verify(&psk_keys, &expected_binder, binder_to_verify));
return S2N_SUCCESS;
}
@@ -186,15 +460,15 @@ int s2n_psk_verify_binder(struct s2n_connection *conn, struct s2n_psk *psk,
static S2N_RESULT s2n_psk_write_binder(struct s2n_connection *conn, struct s2n_psk *psk,
const struct s2n_blob *binder_hash, struct s2n_stuffer *out)
{
- ENSURE_REF(binder_hash);
+ RESULT_ENSURE_REF(binder_hash);
struct s2n_blob binder;
uint8_t binder_data[S2N_TLS13_SECRET_MAX_LEN] = { 0 };
- GUARD_AS_RESULT(s2n_blob_init(&binder, binder_data, binder_hash->size));
+ RESULT_GUARD_POSIX(s2n_blob_init(&binder, binder_data, binder_hash->size));
- GUARD_AS_RESULT(s2n_psk_calculate_binder(psk, binder_hash, &binder));
- GUARD_AS_RESULT(s2n_stuffer_write_uint8(out, binder.size));
- GUARD_AS_RESULT(s2n_stuffer_write(out, &binder));
+ RESULT_GUARD_POSIX(s2n_psk_calculate_binder(psk, binder_hash, &binder));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint8(out, binder.size));
+ RESULT_GUARD_POSIX(s2n_stuffer_write(out, &binder));
return S2N_RESULT_OK;
}
@@ -202,8 +476,8 @@ static S2N_RESULT s2n_psk_write_binder(struct s2n_connection *conn, struct s2n_p
static S2N_RESULT s2n_psk_write_binder_list(struct s2n_connection *conn, const struct s2n_blob *partial_client_hello,
struct s2n_stuffer *out)
{
- ENSURE_REF(conn);
- ENSURE_REF(partial_client_hello);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(partial_client_hello);
struct s2n_psk_parameters *psk_params = &conn->psk_params;
struct s2n_array *psk_list = &psk_params->psk_list;
@@ -214,13 +488,13 @@ static S2N_RESULT s2n_psk_write_binder_list(struct s2n_connection *conn, const s
struct s2n_blob binder_hashes[S2N_HASH_ALG_COUNT] = { 0 };
struct s2n_stuffer_reservation binder_list_size = { 0 };
- GUARD_AS_RESULT(s2n_stuffer_reserve_uint16(out, &binder_list_size));
+ RESULT_GUARD_POSIX(s2n_stuffer_reserve_uint16(out, &binder_list_size));
/* Write binder for every psk */
for (size_t i = 0; i < psk_list->len; i++) {
struct s2n_psk *psk = NULL;
- GUARD_RESULT(s2n_array_get(psk_list, i, (void**) &psk));
- ENSURE_REF(psk);
+ RESULT_GUARD(s2n_array_get(psk_list, i, (void**) &psk));
+ RESULT_ENSURE_REF(psk);
/**
*= https://tools.ietf.org/rfc/rfc8446#section-4.1.4
@@ -238,21 +512,21 @@ static S2N_RESULT s2n_psk_write_binder_list(struct s2n_connection *conn, const s
struct s2n_blob *binder_hash = &binder_hashes[psk->hmac_alg];
if (binder_hash->size == 0) {
uint8_t hash_size = 0;
- GUARD_AS_RESULT(s2n_hmac_digest_size(psk->hmac_alg, &hash_size));
- GUARD_AS_RESULT(s2n_blob_init(binder_hash, binder_hashes_data[psk->hmac_alg], hash_size));
- GUARD_AS_RESULT(s2n_psk_calculate_binder_hash(conn, psk->hmac_alg, partial_client_hello, binder_hash));
+ RESULT_GUARD_POSIX(s2n_hmac_digest_size(psk->hmac_alg, &hash_size));
+ RESULT_GUARD_POSIX(s2n_blob_init(binder_hash, binder_hashes_data[psk->hmac_alg], hash_size));
+ RESULT_GUARD_POSIX(s2n_psk_calculate_binder_hash(conn, psk->hmac_alg, partial_client_hello, binder_hash));
}
- GUARD_RESULT(s2n_psk_write_binder(conn, psk, binder_hash, out));
+ RESULT_GUARD(s2n_psk_write_binder(conn, psk, binder_hash, out));
}
- GUARD_AS_RESULT(s2n_stuffer_write_vector_size(&binder_list_size));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_vector_size(&binder_list_size));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_finish_psk_extension(struct s2n_connection *conn)
{
- ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn);
if (!conn->psk_params.binder_list_size) {
return S2N_RESULT_OK;
@@ -262,72 +536,161 @@ S2N_RESULT s2n_finish_psk_extension(struct s2n_connection *conn)
struct s2n_psk_parameters *psk_params = &conn->psk_params;
/* Fill in the correct message size. */
- GUARD_AS_RESULT(s2n_handshake_finish_header(client_hello));
+ RESULT_GUARD_POSIX(s2n_handshake_finish_header(client_hello));
/* Remove the empty space allocated for the binder list.
* It was originally added to ensure the extension / extension list / message sizes
* were properly calculated. */
- GUARD_AS_RESULT(s2n_stuffer_wipe_n(client_hello, psk_params->binder_list_size));
+ RESULT_GUARD_POSIX(s2n_stuffer_wipe_n(client_hello, psk_params->binder_list_size));
/* Store the partial client hello for use in calculating the binder hash. */
struct s2n_blob partial_client_hello = { 0 };
- GUARD_AS_RESULT(s2n_blob_init(&partial_client_hello, client_hello->blob.data,
+ RESULT_GUARD_POSIX(s2n_blob_init(&partial_client_hello, client_hello->blob.data,
s2n_stuffer_data_available(client_hello)));
- GUARD_RESULT(s2n_psk_write_binder_list(conn, &partial_client_hello, client_hello));
+ RESULT_GUARD(s2n_psk_write_binder_list(conn, &partial_client_hello, client_hello));
return S2N_RESULT_OK;
}
-static S2N_RESULT s2n_psk_set_hmac(struct s2n_psk *psk, s2n_psk_hmac psk_hmac_alg)
+int s2n_psk_set_hmac(struct s2n_psk *psk, s2n_psk_hmac hmac)
{
- switch(psk_hmac_alg) {
- case S2N_PSK_HMAC_SHA224: psk->hmac_alg = S2N_HMAC_SHA224; break;
+ POSIX_ENSURE_REF(psk);
+ switch(hmac) {
case S2N_PSK_HMAC_SHA256: psk->hmac_alg = S2N_HMAC_SHA256; break;
case S2N_PSK_HMAC_SHA384: psk->hmac_alg = S2N_HMAC_SHA384; break;
default:
- BAIL(S2N_ERR_HMAC_INVALID_ALGORITHM);
+ POSIX_BAIL(S2N_ERR_HMAC_INVALID_ALGORITHM);
+ }
+ return S2N_SUCCESS;
+}
+
+S2N_RESULT s2n_connection_set_psk_type(struct s2n_connection *conn, s2n_psk_type type)
+{
+ RESULT_ENSURE_REF(conn);
+ if (conn->psk_params.psk_list.len != 0) {
+ RESULT_ENSURE(conn->psk_params.type == type, S2N_ERR_PSK_MODE);
}
+ conn->psk_params.type = type;
return S2N_RESULT_OK;
}
-int s2n_connection_set_external_psks(struct s2n_connection *conn, struct s2n_external_psk *psk_vec, size_t psk_vec_length)
+int s2n_connection_append_psk(struct s2n_connection *conn, struct s2n_psk *input_psk)
{
- ENSURE_POSIX_REF(conn);
- ENSURE_POSIX_REF(psk_vec);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(input_psk);
+ POSIX_GUARD_RESULT(s2n_connection_set_psk_type(conn, input_psk->type));
+
+ struct s2n_array *psk_list = &conn->psk_params.psk_list;
- /* Remove all previously-set external psks */
- /* The loop iterates from len to 1 instead of from len-1 to 0 to avoid size_t underflowing */
- for (size_t i = conn->psk_params.psk_list.len; i > 0; i--) {
- size_t i_index = i - 1;
- struct s2n_psk *psk = NULL;
- GUARD_AS_POSIX(s2n_array_get(&conn->psk_params.psk_list, i_index, (void**) &psk));
- ENSURE_POSIX_REF(psk);
- if (psk->type == S2N_PSK_TYPE_EXTERNAL) {
- GUARD(s2n_psk_free(psk));
- GUARD_AS_POSIX(s2n_array_remove(&conn->psk_params.psk_list, i_index));
- }
+ /* Check for duplicate identities */
+ for (uint32_t j = 0; j < psk_list->len; j++) {
+ struct s2n_psk *existing_psk = NULL;
+ POSIX_GUARD_RESULT(s2n_array_get(psk_list, j, (void**) &existing_psk));
+ POSIX_ENSURE_REF(existing_psk);
+
+ bool duplicate = existing_psk->identity.size == input_psk->identity.size
+ && memcmp(existing_psk->identity.data, input_psk->identity.data, existing_psk->identity.size) == 0;
+ POSIX_ENSURE(!duplicate, S2N_ERR_DUPLICATE_PSK_IDENTITIES);
}
- for (size_t i = 0; i < psk_vec_length; i++) {
- /* Check for duplicate identities */
- size_t array_len = conn->psk_params.psk_list.len;
- for (size_t j = 0; j < array_len; j++) {
- struct s2n_psk *psk = NULL;
- GUARD_AS_POSIX(s2n_array_get(&conn->psk_params.psk_list, j, (void**) &psk));
- ENSURE_POSIX_REF(psk);
- if (psk->identity.size == psk_vec[i].identity_length) {
- ENSURE_POSIX(memcmp(psk->identity.data, psk_vec[i].identity, psk->identity.size) != 0, S2N_ERR_DUPLICATE_PSK_IDENTITIES);
- }
- }
+ /* Verify the PSK list will fit in the ClientHello pre_shared_key extension */
+ if (conn->mode == S2N_CLIENT) {
+ uint32_t list_size = 0;
+ POSIX_GUARD_RESULT(s2n_psk_parameters_offered_psks_size(&conn->psk_params, &list_size));
+
+ uint32_t psk_size = 0;
+ POSIX_GUARD_RESULT(s2n_psk_offered_psk_size(input_psk, &psk_size));
+
+ POSIX_ENSURE(list_size + psk_size + S2N_EXTENSION_HEADER_LENGTH <= UINT16_MAX, S2N_ERR_OFFERED_PSKS_TOO_LONG);
+ }
+
+ DEFER_CLEANUP(struct s2n_psk new_psk = { 0 }, s2n_psk_wipe);
+ POSIX_ENSURE(s2n_result_is_ok(s2n_psk_clone(&new_psk, input_psk)), S2N_ERR_INVALID_ARGUMENT);
+ POSIX_GUARD_RESULT(s2n_array_insert_and_copy(psk_list, psk_list->len, &new_psk));
+
+ ZERO_TO_DISABLE_DEFER_CLEANUP(new_psk);
+ return S2N_SUCCESS;
+}
+
+int s2n_config_set_psk_mode(struct s2n_config *config, s2n_psk_mode mode)
+{
+ POSIX_ENSURE_REF(config);
+ config->psk_mode = mode;
+ return S2N_SUCCESS;
+}
+
+int s2n_connection_set_psk_mode(struct s2n_connection *conn, s2n_psk_mode mode)
+{
+ POSIX_ENSURE_REF(conn);
+ s2n_psk_type type = 0;
+ switch(mode) {
+ case S2N_PSK_MODE_RESUMPTION:
+ type = S2N_PSK_TYPE_RESUMPTION;
+ break;
+ case S2N_PSK_MODE_EXTERNAL:
+ type = S2N_PSK_TYPE_EXTERNAL;
+ break;
+ default:
+ POSIX_BAIL(S2N_ERR_INVALID_ARGUMENT);
+ break;
+ }
+ POSIX_GUARD_RESULT(s2n_connection_set_psk_type(conn, type));
+ conn->psk_mode_overridden = true;
+ return S2N_SUCCESS;
+}
+
+int s2n_connection_get_negotiated_psk_identity_length(struct s2n_connection *conn, uint16_t *identity_length)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(identity_length);
- struct s2n_psk *new_psk = NULL;
- GUARD_AS_POSIX(s2n_array_pushback(&conn->psk_params.psk_list, (void**) &new_psk));
- ENSURE_POSIX_REF(new_psk);
- GUARD(s2n_psk_init(new_psk, S2N_PSK_TYPE_EXTERNAL));
- GUARD(s2n_psk_new_identity(new_psk, psk_vec[i].identity, psk_vec[i].identity_length));
- GUARD(s2n_psk_new_secret(new_psk, psk_vec[i].secret, psk_vec[i].secret_length));
- GUARD_AS_POSIX(s2n_psk_set_hmac(new_psk, psk_vec[i].hmac));
+ struct s2n_psk *chosen_psk = conn->psk_params.chosen_psk;
+
+ if (chosen_psk == NULL) {
+ *identity_length = 0;
+ } else {
+ *identity_length = chosen_psk->identity.size;
}
return S2N_SUCCESS;
}
+
+int s2n_connection_get_negotiated_psk_identity(struct s2n_connection *conn, uint8_t *identity,
+ uint16_t max_identity_length)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(identity);
+
+ struct s2n_psk *chosen_psk = conn->psk_params.chosen_psk;
+
+ if (chosen_psk == NULL) {
+ return S2N_SUCCESS;
+ }
+
+ POSIX_ENSURE(chosen_psk->identity.size <= max_identity_length, S2N_ERR_INSUFFICIENT_MEM_SIZE);
+ POSIX_CHECKED_MEMCPY(identity, chosen_psk->identity.data, chosen_psk->identity.size);
+
+ return S2N_SUCCESS;
+}
+
+S2N_RESULT s2n_psk_validate_keying_material(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+
+ struct s2n_psk *chosen_psk = conn->psk_params.chosen_psk;
+ if (!chosen_psk || chosen_psk->type != S2N_PSK_TYPE_RESUMPTION) {
+ return S2N_RESULT_OK;
+ }
+
+ /*
+ * The minimum ticket lifetime is 1s, because ticket_lifetime is given
+ * in seconds and 0 indicates that the ticket should be immediately discarded.
+ */
+ uint32_t min_lifetime = ONE_SEC_IN_NANOS;
+
+ uint64_t current_time = 0;
+ RESULT_GUARD_POSIX(conn->config->wall_clock(conn->config->sys_clock_ctx, &current_time));
+ RESULT_ENSURE(chosen_psk->keying_material_expiration > current_time + min_lifetime, S2N_ERR_KEYING_MATERIAL_EXPIRED);
+
+ return S2N_RESULT_OK;
+}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_psk.h b/contrib/restricted/aws/s2n/tls/s2n_psk.h
index 6d7052c876..38ac65f3ac 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_psk.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_psk.h
@@ -15,15 +15,17 @@
#pragma once
-#include <s2n.h>
+#include "api/s2n.h"
#include "crypto/s2n_hmac.h"
+#include "stuffer/s2n_stuffer.h"
+#include "tls/s2n_early_data.h"
#include "utils/s2n_array.h"
#include "utils/s2n_blob.h"
#include "utils/s2n_result.h"
typedef enum {
- S2N_PSK_TYPE_RESUMPTION,
+ S2N_PSK_TYPE_RESUMPTION = 0,
S2N_PSK_TYPE_EXTERNAL,
} s2n_psk_type;
@@ -33,52 +35,50 @@ typedef enum {
S2N_PSK_DHE_KE,
} s2n_psk_key_exchange_mode;
-typedef enum {
- S2N_PSK_HMAC_SHA224 = 0,
- S2N_PSK_HMAC_SHA256,
- S2N_PSK_HMAC_SHA384,
-} s2n_psk_hmac;
-
-struct s2n_external_psk {
- uint8_t *identity;
- size_t identity_length;
- uint8_t *secret;
- size_t secret_length;
- s2n_psk_hmac hmac;
-};
-
struct s2n_psk {
s2n_psk_type type;
struct s2n_blob identity;
struct s2n_blob secret;
s2n_hmac_algorithm hmac_alg;
- uint32_t obfuscated_ticket_age;
+ uint32_t ticket_age_add;
+ uint64_t ticket_issue_time;
struct s2n_blob early_secret;
-};
+ struct s2n_early_data_config early_data_config;
-struct s2n_psk_identity {
- uint8_t *data;
- uint16_t length;
+ /* This field is used with session tickets to track the lifetime
+ * of the original full handshake across multiple tickets.
+ * See https://tools.ietf.org/rfc/rfc8446#section-4.6.1
+ */
+ uint64_t keying_material_expiration;
};
+S2N_RESULT s2n_psk_init(struct s2n_psk *psk, s2n_psk_type type);
+S2N_CLEANUP_RESULT s2n_psk_wipe(struct s2n_psk *psk);
+S2N_RESULT s2n_psk_clone(struct s2n_psk *new_psk, struct s2n_psk *original_psk);
struct s2n_psk_parameters {
+ s2n_psk_type type;
struct s2n_array psk_list;
uint16_t binder_list_size;
uint16_t chosen_psk_wire_index;
struct s2n_psk *chosen_psk;
s2n_psk_key_exchange_mode psk_ke_mode;
};
-
-/* This function will be labeled S2N_API and become a publicly visible api once we release the psk API. */
-int s2n_connection_set_external_psks(struct s2n_connection *conn, struct s2n_external_psk *psk_vec, size_t psk_vec_length);
-
-int s2n_psk_init(struct s2n_psk *psk, s2n_psk_type type);
-int s2n_psk_new_identity(struct s2n_psk *psk, const uint8_t *identity, size_t identity_size);
-int s2n_psk_new_secret(struct s2n_psk *psk, const uint8_t *secret, size_t secret_size);
-int s2n_psk_free(struct s2n_psk *psk);
-
S2N_RESULT s2n_psk_parameters_init(struct s2n_psk_parameters *params);
+S2N_RESULT s2n_psk_parameters_offered_psks_size(struct s2n_psk_parameters *params, uint32_t *size);
S2N_CLEANUP_RESULT s2n_psk_parameters_wipe(struct s2n_psk_parameters *params);
+S2N_CLEANUP_RESULT s2n_psk_parameters_wipe_secrets(struct s2n_psk_parameters *params);
+
+struct s2n_offered_psk {
+ struct s2n_blob identity;
+ uint16_t wire_index;
+ uint32_t obfuscated_ticket_age;
+};
+
+struct s2n_offered_psk_list {
+ struct s2n_connection *conn;
+ struct s2n_stuffer wire_data;
+ uint16_t wire_index;
+};
S2N_RESULT s2n_finish_psk_extension(struct s2n_connection *conn);
@@ -89,9 +89,5 @@ int s2n_psk_calculate_binder(struct s2n_psk *psk, const struct s2n_blob *binder_
int s2n_psk_verify_binder(struct s2n_connection *conn, struct s2n_psk *psk,
const struct s2n_blob *partial_client_hello, struct s2n_blob *binder_to_verify);
-typedef int (*s2n_psk_selection_callback)(struct s2n_connection *conn,
- struct s2n_psk_identity *identities, size_t identities_length,
- uint16_t *chosen_wire_index);
-/* This function will be labeled S2N_API and become a publicly visible api once we release the psk API. */
-int s2n_config_set_psk_selection_callback(struct s2n_connection *conn, s2n_psk_selection_callback cb);
-
+S2N_RESULT s2n_connection_set_psk_type(struct s2n_connection *conn, s2n_psk_type type);
+S2N_RESULT s2n_psk_validate_keying_material(struct s2n_connection *conn);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_quic_support.c b/contrib/restricted/aws/s2n/tls/s2n_quic_support.c
index 226b87ee9a..bd0b44f906 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_quic_support.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_quic_support.c
@@ -37,19 +37,33 @@ S2N_RESULT s2n_read_in_bytes(struct s2n_connection *conn, struct s2n_stuffer *ou
int s2n_config_enable_quic(struct s2n_config *config)
{
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
config->quic_enabled = true;
return S2N_SUCCESS;
}
+int s2n_connection_enable_quic(struct s2n_connection *conn)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_GUARD_RESULT(s2n_connection_validate_tls13_support(conn));
+ conn->quic_enabled = true;
+ return S2N_SUCCESS;
+}
+
+bool s2n_connection_is_quic_enabled(struct s2n_connection *conn)
+{
+ return (conn && conn->quic_enabled) ||
+ (conn && conn->config && conn->config->quic_enabled);
+}
+
int s2n_connection_set_quic_transport_parameters(struct s2n_connection *conn,
const uint8_t *data_buffer, uint16_t data_len)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
- GUARD(s2n_free(&conn->our_quic_transport_parameters));
- GUARD(s2n_alloc(&conn->our_quic_transport_parameters, data_len));
- memcpy_check(conn->our_quic_transport_parameters.data, data_buffer, data_len);
+ POSIX_GUARD(s2n_free(&conn->our_quic_transport_parameters));
+ POSIX_GUARD(s2n_alloc(&conn->our_quic_transport_parameters, data_len));
+ POSIX_CHECKED_MEMCPY(conn->our_quic_transport_parameters.data, data_buffer, data_len);
return S2N_SUCCESS;
}
@@ -57,9 +71,9 @@ int s2n_connection_set_quic_transport_parameters(struct s2n_connection *conn,
int s2n_connection_get_quic_transport_parameters(struct s2n_connection *conn,
const uint8_t **data_buffer, uint16_t *data_len)
{
- notnull_check(conn);
- notnull_check(data_buffer);
- notnull_check(data_len);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(data_buffer);
+ POSIX_ENSURE_REF(data_len);
*data_buffer = conn->peer_quic_transport_parameters.data;
*data_len = conn->peer_quic_transport_parameters.size;
@@ -69,8 +83,8 @@ int s2n_connection_get_quic_transport_parameters(struct s2n_connection *conn,
int s2n_connection_set_secret_callback(struct s2n_connection *conn, s2n_secret_cb cb_func, void *ctx)
{
- notnull_check(conn);
- notnull_check(cb_func);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(cb_func);
conn->secret_cb = cb_func;
conn->secret_cb_context = ctx;
@@ -83,19 +97,19 @@ int s2n_connection_set_secret_callback(struct s2n_connection *conn, s2n_secret_c
*/
S2N_RESULT s2n_quic_read_handshake_message(struct s2n_connection *conn, uint8_t *message_type)
{
- ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn);
/* Allocate stuffer space now so that we don't have to realloc later in the handshake. */
- GUARD_AS_RESULT(s2n_stuffer_resize_if_empty(&conn->in, S2N_EXPECTED_QUIC_MESSAGE_SIZE));
+ RESULT_GUARD_POSIX(s2n_stuffer_resize_if_empty(&conn->in, S2N_EXPECTED_QUIC_MESSAGE_SIZE));
- GUARD_RESULT(s2n_read_in_bytes(conn, &conn->handshake.io, TLS_HANDSHAKE_HEADER_LENGTH));
+ RESULT_GUARD(s2n_read_in_bytes(conn, &conn->handshake.io, TLS_HANDSHAKE_HEADER_LENGTH));
uint32_t message_len;
- GUARD_AS_RESULT(s2n_handshake_parse_header(conn, message_type, &message_len));
- GUARD_AS_RESULT(s2n_stuffer_reread(&conn->handshake.io));
+ RESULT_GUARD_POSIX(s2n_handshake_parse_header(conn, message_type, &message_len));
+ RESULT_GUARD_POSIX(s2n_stuffer_reread(&conn->handshake.io));
- ENSURE(message_len < S2N_MAXIMUM_HANDSHAKE_MESSAGE_LENGTH, S2N_ERR_BAD_MESSAGE);
- GUARD_RESULT(s2n_read_in_bytes(conn, &conn->in, message_len));
+ RESULT_ENSURE(message_len < S2N_MAXIMUM_HANDSHAKE_MESSAGE_LENGTH, S2N_ERR_BAD_MESSAGE);
+ RESULT_GUARD(s2n_read_in_bytes(conn, &conn->in, message_len));
return S2N_RESULT_OK;
}
@@ -105,11 +119,11 @@ S2N_RESULT s2n_quic_read_handshake_message(struct s2n_connection *conn, uint8_t
*/
S2N_RESULT s2n_quic_write_handshake_message(struct s2n_connection *conn, struct s2n_blob *in)
{
- ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn);
/* Allocate stuffer space now so that we don't have to realloc later in the handshake. */
- GUARD_AS_RESULT(s2n_stuffer_resize_if_empty(&conn->out, S2N_EXPECTED_QUIC_MESSAGE_SIZE));
+ RESULT_GUARD_POSIX(s2n_stuffer_resize_if_empty(&conn->out, S2N_EXPECTED_QUIC_MESSAGE_SIZE));
- GUARD_AS_RESULT(s2n_stuffer_write(&conn->out, in));
+ RESULT_GUARD_POSIX(s2n_stuffer_write(&conn->out, in));
return S2N_RESULT_OK;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_quic_support.h b/contrib/restricted/aws/s2n/tls/s2n_quic_support.h
index 459e03a2fd..c64583589a 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_quic_support.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_quic_support.h
@@ -25,11 +25,13 @@
* the behavior of S2N in potentially dangerous ways and should only be used by implementations
* of the QUIC protocol.
*
- * Additionally, the QUIC RFC is not yet finalized, so all QUIC APIs are considered experimental
- * and are subject to change without notice. They should only be used for testing purposes.
+ * Additionally, all QUIC APIs are considered experimental and are subject to change without
+ * notice. They should only be used for testing purposes.
*/
S2N_API int s2n_config_enable_quic(struct s2n_config *config);
+S2N_API int s2n_connection_enable_quic(struct s2n_connection *conn);
+S2N_API bool s2n_connection_is_quic_enabled(struct s2n_connection *conn);
/*
* Set the data to be sent in the quic_transport_parameters extension.
@@ -73,4 +75,17 @@ typedef int (*s2n_secret_cb) (void* context, struct s2n_connection *conn,
* The callback function will ONLY be triggered if QUIC is enabled. This API is not intended to be
* used outside of a QUIC implementation.
*/
-int s2n_connection_set_secret_callback(struct s2n_connection *conn, s2n_secret_cb cb_func, void *ctx);
+S2N_API int s2n_connection_set_secret_callback(struct s2n_connection *conn, s2n_secret_cb cb_func, void *ctx);
+
+/*
+ * Return the TLS alert that S2N-TLS would send, if S2N-TLS sent specific alerts.
+ *
+ * S2N-TLS only sends generic close_notify alerts for security reasons, and TLS never
+ * sends alerts when used by QUIC. This method returns the alert that would have been
+ * sent if S2N-TLS sent specific alerts as defined in the protocol specifications.
+ *
+ * WARNING: this method is still considered experimental and will not always report
+ * the correct alert description. It may be used for testing and logging, but
+ * not relied on for production logic.
+ */
+S2N_API int s2n_error_get_alert(int error, uint8_t *alert);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_record.h b/contrib/restricted/aws/s2n/tls/s2n_record.h
index f5b6f27502..53aeba58bb 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_record.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_record.h
@@ -16,11 +16,56 @@
#pragma once
#include <stdint.h>
+#include "crypto/s2n_hmac.h"
+#include "stuffer/s2n_stuffer.h"
-#include "s2n_connection.h"
+#define S2N_TLS_CONTENT_TYPE_LENGTH 1
-#define TLS13_CONTENT_TYPE_LENGTH 1
+/* All versions of TLS define the record header the same:
+ * ContentType + ProtocolVersion + length
+ */
+#define S2N_TLS_RECORD_HEADER_LENGTH (S2N_TLS_CONTENT_TYPE_LENGTH + S2N_TLS_PROTOCOL_VERSION_LEN + 2)
+
+/*
+ * All versions of TLS limit the data fragment to 2^14 bytes.
+ *
+ *= https://tools.ietf.org/rfc/rfc5246#section-6.2.1
+ *# The record layer fragments information blocks into TLSPlaintext
+ *# records carrying data in chunks of 2^14 bytes or less.
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#section-5.1
+ *# The record layer fragments information blocks into TLSPlaintext
+ *# records carrying data in chunks of 2^14 bytes or less.
+ */
+#define S2N_TLS_MAXIMUM_FRAGMENT_LENGTH (1 << 14)
+
+/* The TLS1.2 record length allows for 1024 bytes of compression expansion and
+ * 1024 bytes of encryption expansion and padding.
+ * Since S2N does not support compression, we can ignore the compression overhead.
+ */
+#define S2N_TLS12_ENCRYPTION_OVERHEAD_SIZE 1024
+#define S2N_TLS12_MAX_RECORD_LEN_FOR(frag) ((frag) + S2N_TLS12_ENCRYPTION_OVERHEAD_SIZE \
+ + S2N_TLS_RECORD_HEADER_LENGTH)
+#define S2N_TLS12_MAXIMUM_RECORD_LENGTH S2N_TLS12_MAX_RECORD_LEN_FOR(S2N_TLS_MAXIMUM_FRAGMENT_LENGTH)
+
+/*
+ *= https://tools.ietf.org/rfc/rfc8446#section-5.2
+ *# An AEAD algorithm used in TLS 1.3 MUST NOT produce an expansion
+ *# greater than 255 octets.
+ */
+#define S2N_TLS13_ENCRYPTION_OVERHEAD_SIZE 255
+#define S2N_TLS13_MAX_RECORD_LEN_FOR(frag) ((frag) + S2N_TLS_CONTENT_TYPE_LENGTH \
+ + S2N_TLS13_ENCRYPTION_OVERHEAD_SIZE \
+ + S2N_TLS_RECORD_HEADER_LENGTH)
+#define S2N_TLS13_MAXIMUM_RECORD_LENGTH S2N_TLS13_MAX_RECORD_LEN_FOR(S2N_TLS_MAXIMUM_FRAGMENT_LENGTH)
+
+/* Currently, TLS1.2 records may be larger than TLS1.3 records.
+ * If the protocol is unknown, assume TLS1.2.
+ */
+#define S2N_TLS_MAX_RECORD_LEN_FOR(frag) S2N_TLS12_MAX_RECORD_LEN_FOR(frag)
+#define S2N_TLS_MAXIMUM_RECORD_LENGTH S2N_TLS_MAX_RECORD_LEN_FOR(S2N_TLS_MAXIMUM_FRAGMENT_LENGTH)
+S2N_RESULT s2n_record_max_write_size(struct s2n_connection *conn, uint16_t max_fragment_size, uint16_t *max_record_size);
extern S2N_RESULT s2n_record_max_write_payload_size(struct s2n_connection *conn, uint16_t *max_fragment_size);
extern S2N_RESULT s2n_record_min_write_payload_size(struct s2n_connection *conn, uint16_t *payload_size);
extern int s2n_record_write(struct s2n_connection *conn, uint8_t content_type, struct s2n_blob *in);
@@ -30,5 +75,5 @@ extern int s2n_record_header_parse(struct s2n_connection *conn, uint8_t * conten
extern int s2n_tls13_parse_record_type(struct s2n_stuffer *stuffer, uint8_t * record_type);
extern int s2n_sslv2_record_header_parse(struct s2n_connection *conn, uint8_t * record_type, uint8_t * client_protocol_version, uint16_t * fragment_length);
extern int s2n_verify_cbc(struct s2n_connection *conn, struct s2n_hmac_state *hmac, struct s2n_blob *decrypted);
-extern S2N_RESULT s2n_aead_aad_init(const struct s2n_connection *conn, uint8_t * sequence_number, uint8_t content_type, uint16_t record_length, struct s2n_stuffer *ad);
-extern S2N_RESULT s2n_tls13_aead_aad_init(uint16_t record_length, uint8_t tag_length, struct s2n_stuffer *ad);
+extern S2N_RESULT s2n_aead_aad_init(const struct s2n_connection *conn, uint8_t * sequence_number, uint8_t content_type, uint16_t record_length, struct s2n_blob *ad);
+extern S2N_RESULT s2n_tls13_aead_aad_init(uint16_t record_length, uint8_t tag_length, struct s2n_blob *ad);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_record_read.c b/contrib/restricted/aws/s2n/tls/s2n_record_read.c
index 7b8536818e..8a1bf8836f 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_record_read.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_record_read.c
@@ -41,15 +41,16 @@ int s2n_sslv2_record_header_parse(
S2N_ERROR_IF(s2n_stuffer_data_available(in) < S2N_TLS_RECORD_HEADER_LENGTH, S2N_ERR_BAD_MESSAGE);
- GUARD(s2n_stuffer_read_uint16(in, fragment_length));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, fragment_length));
/* Adjust to account for the 3 bytes of payload data we consumed in the header */
+ POSIX_ENSURE_GTE(*fragment_length, 3);
*fragment_length -= 3;
- GUARD(s2n_stuffer_read_uint8(in, record_type));
+ POSIX_GUARD(s2n_stuffer_read_uint8(in, record_type));
uint8_t protocol_version[S2N_TLS_PROTOCOL_VERSION_LEN];
- GUARD(s2n_stuffer_read_bytes(in, protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
+ POSIX_GUARD(s2n_stuffer_read_bytes(in, protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
*client_protocol_version = (protocol_version[0] * 10) + protocol_version[1];
@@ -65,10 +66,10 @@ int s2n_record_header_parse(
S2N_ERROR_IF(s2n_stuffer_data_available(in) < S2N_TLS_RECORD_HEADER_LENGTH, S2N_ERR_BAD_MESSAGE);
- GUARD(s2n_stuffer_read_uint8(in, content_type));
+ POSIX_GUARD(s2n_stuffer_read_uint8(in, content_type));
uint8_t protocol_version[S2N_TLS_PROTOCOL_VERSION_LEN];
- GUARD(s2n_stuffer_read_bytes(in, protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
+ POSIX_GUARD(s2n_stuffer_read_bytes(in, protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
const uint8_t version = (protocol_version[0] * 10) + protocol_version[1];
/* https://tools.ietf.org/html/rfc5246#appendix-E.1 states that servers must accept any value {03,XX} as the record
@@ -82,13 +83,13 @@ int s2n_record_header_parse(
S2N_ERROR_IF(conn->actual_protocol_version_established &&
MIN(conn->actual_protocol_version, S2N_TLS12) /* check against legacy record version (1.2) in tls 1.3 */
!= version, S2N_ERR_BAD_MESSAGE);
- GUARD(s2n_stuffer_read_uint16(in, fragment_length));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, fragment_length));
/* Some servers send fragments that are above the maximum length. (e.g.
* Openssl 1.0.1, so we don't check if the fragment length is >
* S2N_TLS_MAXIMUM_FRAGMENT_LENGTH. The on-the-wire max is 65k
*/
- GUARD(s2n_stuffer_reread(in));
+ POSIX_GUARD(s2n_stuffer_reread(in));
return 0;
}
@@ -115,7 +116,7 @@ int s2n_record_parse(struct s2n_connection *conn)
{
uint8_t content_type;
uint16_t encrypted_length;
- GUARD(s2n_record_header_parse(conn, &content_type, &encrypted_length));
+ POSIX_GUARD(s2n_record_header_parse(conn, &content_type, &encrypted_length));
struct s2n_crypto_parameters *current_client_crypto = conn->client;
struct s2n_crypto_parameters *current_server_crypto = conn->server;
@@ -143,21 +144,27 @@ int s2n_record_parse(struct s2n_connection *conn)
conn->server = current_server_crypto;
}
+ /* The NULL stream cipher MUST NEVER be used for ApplicationData.
+ * If ApplicationData is unencrypted, we can't trust it. */
+ if (cipher_suite->record_alg->cipher == &s2n_null_cipher) {
+ POSIX_ENSURE(content_type != TLS_APPLICATION_DATA, S2N_ERR_DECRYPT);
+ }
+
switch (cipher_suite->record_alg->cipher->type) {
case S2N_AEAD:
- GUARD(s2n_record_parse_aead(cipher_suite, conn, content_type, encrypted_length, implicit_iv, mac, sequence_number, session_key));
+ POSIX_GUARD(s2n_record_parse_aead(cipher_suite, conn, content_type, encrypted_length, implicit_iv, mac, sequence_number, session_key));
break;
case S2N_CBC:
- GUARD(s2n_record_parse_cbc(cipher_suite, conn, content_type, encrypted_length, implicit_iv, mac, sequence_number, session_key));
+ POSIX_GUARD(s2n_record_parse_cbc(cipher_suite, conn, content_type, encrypted_length, implicit_iv, mac, sequence_number, session_key));
break;
case S2N_COMPOSITE:
- GUARD(s2n_record_parse_composite(cipher_suite, conn, content_type, encrypted_length, implicit_iv, mac, sequence_number, session_key));
+ POSIX_GUARD(s2n_record_parse_composite(cipher_suite, conn, content_type, encrypted_length, implicit_iv, mac, sequence_number, session_key));
break;
case S2N_STREAM:
- GUARD(s2n_record_parse_stream(cipher_suite, conn, content_type, encrypted_length, implicit_iv, mac, sequence_number, session_key));
+ POSIX_GUARD(s2n_record_parse_stream(cipher_suite, conn, content_type, encrypted_length, implicit_iv, mac, sequence_number, session_key));
break;
default:
- S2N_ERROR(S2N_ERR_CIPHER_TYPE);
+ POSIX_BAIL(S2N_ERR_CIPHER_TYPE);
break;
}
@@ -181,7 +188,7 @@ int s2n_tls13_parse_record_type(struct s2n_stuffer *stuffer, uint8_t *record_typ
S2N_ERROR_IF(bytes_left > S2N_MAXIMUM_INNER_PLAINTEXT_LENGTH + 16, S2N_ERR_MAX_INNER_PLAINTEXT_SIZE);
/* set cursor to the end of the stuffer */
- GUARD(s2n_stuffer_skip_read(stuffer, bytes_left));
+ POSIX_GUARD(s2n_stuffer_skip_read(stuffer, bytes_left));
/* Record type should have values greater than zero.
* If zero, treat as padding, keep reading and wiping from the back
@@ -190,18 +197,18 @@ int s2n_tls13_parse_record_type(struct s2n_stuffer *stuffer, uint8_t *record_typ
*record_type = 0;
while (*record_type == 0) {
/* back the cursor by one to read off the last byte */
- GUARD(s2n_stuffer_rewind_read(stuffer, 1));
+ POSIX_GUARD(s2n_stuffer_rewind_read(stuffer, 1));
/* set the record type */
- GUARD(s2n_stuffer_read_uint8(stuffer, record_type));
+ POSIX_GUARD(s2n_stuffer_read_uint8(stuffer, record_type));
/* wipe the last byte at the end of the stuffer */
- GUARD(s2n_stuffer_wipe_n(stuffer, 1));
+ POSIX_GUARD(s2n_stuffer_wipe_n(stuffer, 1));
}
/* only the original plaintext should remain */
/* now reset the read cursor at where it should be */
- GUARD(s2n_stuffer_reread(stuffer));
+ POSIX_GUARD(s2n_stuffer_reread(stuffer));
/* Even in the incorrect case above with up to 16 extra bytes, we should never see too much data after unpadding */
S2N_ERROR_IF(s2n_stuffer_data_available(stuffer) > S2N_MAXIMUM_INNER_PLAINTEXT_LENGTH - 1, S2N_ERR_MAX_INNER_PLAINTEXT_SIZE);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_record_read_aead.c b/contrib/restricted/aws/s2n/tls/s2n_record_read_aead.c
index 87207d3723..ba1d460d82 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_record_read_aead.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_record_read_aead.c
@@ -46,19 +46,19 @@ int s2n_record_parse_aead(
s2n_stack_blob(aad, is_tls13_record ? S2N_TLS13_AAD_LEN : S2N_TLS_MAX_AAD_LEN, S2N_TLS_MAX_AAD_LEN);
struct s2n_blob en = {.size = encrypted_length,.data = s2n_stuffer_raw_read(&conn->in, encrypted_length) };
- notnull_check(en.data);
+ POSIX_ENSURE_REF(en.data);
/* In AEAD mode, the explicit IV is in the record */
- gte_check(en.size, cipher_suite->record_alg->cipher->io.aead.record_iv_size);
+ POSIX_ENSURE_GTE(en.size, cipher_suite->record_alg->cipher->io.aead.record_iv_size);
uint8_t aad_iv[S2N_TLS_MAX_IV_LEN] = { 0 };
struct s2n_blob iv = {.data = aad_iv,.size = sizeof(aad_iv) };
struct s2n_stuffer iv_stuffer = {0};
- GUARD(s2n_stuffer_init(&iv_stuffer, &iv));
+ POSIX_GUARD(s2n_stuffer_init(&iv_stuffer, &iv));
if (cipher_suite->record_alg->flags & S2N_TLS12_AES_GCM_AEAD_NONCE) {
/* Partially explicit nonce. See RFC 5288 Section 3 */
- GUARD(s2n_stuffer_write_bytes(&iv_stuffer, implicit_iv, cipher_suite->record_alg->cipher->io.aead.fixed_iv_size));
- GUARD(s2n_stuffer_write_bytes(&iv_stuffer, en.data, cipher_suite->record_alg->cipher->io.aead.record_iv_size));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&iv_stuffer, implicit_iv, cipher_suite->record_alg->cipher->io.aead.fixed_iv_size));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&iv_stuffer, en.data, cipher_suite->record_alg->cipher->io.aead.record_iv_size));
} else if (cipher_suite->record_alg->flags & S2N_TLS12_CHACHA_POLY_AEAD_NONCE || is_tls13_record) {
/* Fully implicit nonce.
* This is introduced with ChaChaPoly with RFC 7905 Section 2
@@ -68,14 +68,14 @@ int s2n_record_parse_aead(
* to align and xor-ed with the 96-bit IV.
**/
uint8_t four_zeroes[4] = { 0 };
- GUARD(s2n_stuffer_write_bytes(&iv_stuffer, four_zeroes, 4));
- GUARD(s2n_stuffer_write_bytes(&iv_stuffer, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&iv_stuffer, four_zeroes, 4));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&iv_stuffer, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
for (int i = 0; i < cipher_suite->record_alg->cipher->io.aead.fixed_iv_size; i++) {
S2N_INVARIANT(i <= cipher_suite->record_alg->cipher->io.aead.fixed_iv_size);
aad_iv[i] = aad_iv[i] ^ implicit_iv[i];
}
} else {
- S2N_ERROR(S2N_ERR_INVALID_NONCE_TYPE);
+ POSIX_BAIL(S2N_ERR_INVALID_NONCE_TYPE);
}
/* Set the IV size to the amount of data written */
@@ -83,17 +83,14 @@ int s2n_record_parse_aead(
uint16_t payload_length = encrypted_length;
/* remove the AEAD overhead from the record size */
- gte_check(payload_length, cipher_suite->record_alg->cipher->io.aead.record_iv_size + cipher_suite->record_alg->cipher->io.aead.tag_size);
+ POSIX_ENSURE_GTE(payload_length, cipher_suite->record_alg->cipher->io.aead.record_iv_size + cipher_suite->record_alg->cipher->io.aead.tag_size);
payload_length -= cipher_suite->record_alg->cipher->io.aead.record_iv_size;
payload_length -= cipher_suite->record_alg->cipher->io.aead.tag_size;
- struct s2n_stuffer ad_stuffer = {0};
- GUARD(s2n_stuffer_init(&ad_stuffer, &aad));
-
if (is_tls13_record) {
- GUARD_AS_POSIX(s2n_tls13_aead_aad_init(payload_length, cipher_suite->record_alg->cipher->io.aead.tag_size, &ad_stuffer));
+ POSIX_GUARD_RESULT(s2n_tls13_aead_aad_init(payload_length, cipher_suite->record_alg->cipher->io.aead.tag_size, &aad));
} else {
- GUARD_AS_POSIX(s2n_aead_aad_init(conn, sequence_number, content_type, payload_length, &ad_stuffer));
+ POSIX_GUARD_RESULT(s2n_aead_aad_init(conn, sequence_number, content_type, payload_length, &aad));
}
/* Decrypt stuff! */
@@ -102,25 +99,25 @@ int s2n_record_parse_aead(
en.data += cipher_suite->record_alg->cipher->io.aead.record_iv_size;
/* Check that we have some data to decrypt */
- ne_check(en.size, 0);
+ POSIX_ENSURE_NE(en.size, 0);
- GUARD(cipher_suite->record_alg->cipher->io.aead.decrypt(session_key, &iv, &aad, &en, &en));
+ POSIX_GUARD(cipher_suite->record_alg->cipher->io.aead.decrypt(session_key, &iv, &aad, &en, &en));
struct s2n_blob seq = {.data = sequence_number,.size = S2N_TLS_SEQUENCE_NUM_LEN };
- GUARD(s2n_increment_sequence_number(&seq));
+ POSIX_GUARD(s2n_increment_sequence_number(&seq));
/* O.k., we've successfully read and decrypted the record, now we need to align the stuffer
* for reading the plaintext data.
*/
- GUARD(s2n_stuffer_reread(&conn->in));
- GUARD(s2n_stuffer_reread(&conn->header_in));
+ POSIX_GUARD(s2n_stuffer_reread(&conn->in));
+ POSIX_GUARD(s2n_stuffer_reread(&conn->header_in));
/* Skip the IV, if any */
if (conn->actual_protocol_version >= S2N_TLS12) {
- GUARD(s2n_stuffer_skip_read(&conn->in, cipher_suite->record_alg->cipher->io.aead.record_iv_size));
+ POSIX_GUARD(s2n_stuffer_skip_read(&conn->in, cipher_suite->record_alg->cipher->io.aead.record_iv_size));
}
/* Truncate and wipe the MAC and any padding */
- GUARD(s2n_stuffer_wipe_n(&conn->in, s2n_stuffer_data_available(&conn->in) - payload_length));
+ POSIX_GUARD(s2n_stuffer_wipe_n(&conn->in, s2n_stuffer_data_available(&conn->in) - payload_length));
conn->in_status = PLAINTEXT;
return 0;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_record_read_cbc.c b/contrib/restricted/aws/s2n/tls/s2n_record_read_cbc.c
index 9948469593..f72f090915 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_record_read_cbc.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_record_read_cbc.c
@@ -45,85 +45,85 @@ int s2n_record_parse_cbc(
/* Add the header to the HMAC */
uint8_t *header = s2n_stuffer_raw_read(&conn->header_in, S2N_TLS_RECORD_HEADER_LENGTH);
- notnull_check(header);
+ POSIX_ENSURE_REF(header);
- lte_check(cipher_suite->record_alg->cipher->io.cbc.record_iv_size, S2N_TLS_MAX_IV_LEN);
+ POSIX_ENSURE_LTE(cipher_suite->record_alg->cipher->io.cbc.record_iv_size, S2N_TLS_MAX_IV_LEN);
/* For TLS >= 1.1 the IV is in the packet */
if (conn->actual_protocol_version > S2N_TLS10) {
- GUARD(s2n_stuffer_read(&conn->in, &iv));
- gte_check(encrypted_length, iv.size);
+ POSIX_GUARD(s2n_stuffer_read(&conn->in, &iv));
+ POSIX_ENSURE_GTE(encrypted_length, iv.size);
encrypted_length -= iv.size;
}
struct s2n_blob en = {.size = encrypted_length,.data = s2n_stuffer_raw_read(&conn->in, encrypted_length) };
- notnull_check(en.data);
+ POSIX_ENSURE_REF(en.data);
uint16_t payload_length = encrypted_length;
uint8_t mac_digest_size;
- GUARD(s2n_hmac_digest_size(mac->alg, &mac_digest_size));
+ POSIX_GUARD(s2n_hmac_digest_size(mac->alg, &mac_digest_size));
- gte_check(payload_length, mac_digest_size);
+ POSIX_ENSURE_GTE(payload_length, mac_digest_size);
payload_length -= mac_digest_size;
/* Decrypt stuff! */
/* Check that we have some data to decrypt */
- ne_check(en.size, 0);
+ POSIX_ENSURE_NE(en.size, 0);
/* ... and that we have a multiple of the block size */
- eq_check(en.size % iv.size, 0);
+ POSIX_ENSURE_EQ(en.size % iv.size, 0);
/* Copy the last encrypted block to be the next IV */
if (conn->actual_protocol_version < S2N_TLS11) {
- memcpy_check(ivpad, en.data + en.size - iv.size, iv.size);
+ POSIX_CHECKED_MEMCPY(ivpad, en.data + en.size - iv.size, iv.size);
}
- GUARD(cipher_suite->record_alg->cipher->io.cbc.decrypt(session_key, &iv, &en, &en));
+ POSIX_GUARD(cipher_suite->record_alg->cipher->io.cbc.decrypt(session_key, &iv, &en, &en));
if (conn->actual_protocol_version < S2N_TLS11) {
- memcpy_check(implicit_iv, ivpad, iv.size);
+ POSIX_CHECKED_MEMCPY(implicit_iv, ivpad, iv.size);
}
/* Subtract the padding length */
- gt_check(en.size, 0);
+ POSIX_ENSURE_GT(en.size, 0);
uint32_t out = 0;
- GUARD(s2n_sub_overflow(payload_length, en.data[en.size - 1] + 1, &out));
+ POSIX_GUARD(s2n_sub_overflow(payload_length, en.data[en.size - 1] + 1, &out));
payload_length = out;
/* Update the MAC */
header[3] = (payload_length >> 8);
header[4] = payload_length & 0xff;
- GUARD(s2n_hmac_reset(mac));
- GUARD(s2n_hmac_update(mac, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
+ POSIX_GUARD(s2n_hmac_reset(mac));
+ POSIX_GUARD(s2n_hmac_update(mac, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
if (conn->actual_protocol_version == S2N_SSLv3) {
- GUARD(s2n_hmac_update(mac, header, 1));
- GUARD(s2n_hmac_update(mac, header + 3, 2));
+ POSIX_GUARD(s2n_hmac_update(mac, header, 1));
+ POSIX_GUARD(s2n_hmac_update(mac, header + 3, 2));
} else {
- GUARD(s2n_hmac_update(mac, header, S2N_TLS_RECORD_HEADER_LENGTH));
+ POSIX_GUARD(s2n_hmac_update(mac, header, S2N_TLS_RECORD_HEADER_LENGTH));
}
struct s2n_blob seq = {.data = sequence_number,.size = S2N_TLS_SEQUENCE_NUM_LEN };
- GUARD(s2n_increment_sequence_number(&seq));
+ POSIX_GUARD(s2n_increment_sequence_number(&seq));
- /* Padding */
+ /* Padding. This finalizes the provided HMAC. */
if (s2n_verify_cbc(conn, mac, &en) < 0) {
- GUARD(s2n_stuffer_wipe(&conn->in));
- S2N_ERROR(S2N_ERR_BAD_MESSAGE);
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->in));
+ POSIX_BAIL(S2N_ERR_BAD_MESSAGE);
}
/* O.k., we've successfully read and decrypted the record, now we need to align the stuffer
* for reading the plaintext data.
*/
- GUARD(s2n_stuffer_reread(&conn->in));
- GUARD(s2n_stuffer_reread(&conn->header_in));
+ POSIX_GUARD(s2n_stuffer_reread(&conn->in));
+ POSIX_GUARD(s2n_stuffer_reread(&conn->header_in));
/* Skip the IV, if any */
if (conn->actual_protocol_version > S2N_TLS10) {
- GUARD(s2n_stuffer_skip_read(&conn->in, cipher_suite->record_alg->cipher->io.cbc.record_iv_size));
+ POSIX_GUARD(s2n_stuffer_skip_read(&conn->in, cipher_suite->record_alg->cipher->io.cbc.record_iv_size));
}
/* Truncate and wipe the MAC and any padding */
- GUARD(s2n_stuffer_wipe_n(&conn->in, s2n_stuffer_data_available(&conn->in) - payload_length));
+ POSIX_GUARD(s2n_stuffer_wipe_n(&conn->in, s2n_stuffer_data_available(&conn->in) - payload_length));
conn->in_status = PLAINTEXT;
return 0;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_record_read_composite.c b/contrib/restricted/aws/s2n/tls/s2n_record_read_composite.c
index 3d39bdd8c7..62b082f576 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_record_read_composite.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_record_read_composite.c
@@ -45,16 +45,16 @@ int s2n_record_parse_composite(
/* Add the header to the HMAC */
uint8_t *header = s2n_stuffer_raw_read(&conn->header_in, S2N_TLS_RECORD_HEADER_LENGTH);
- notnull_check(header);
+ POSIX_ENSURE_REF(header);
struct s2n_blob en = {.size = encrypted_length,.data = s2n_stuffer_raw_read(&conn->in, encrypted_length) };
- notnull_check(en.data);
+ POSIX_ENSURE_REF(en.data);
uint16_t payload_length = encrypted_length;
uint8_t mac_digest_size;
- GUARD(s2n_hmac_digest_size(mac->alg, &mac_digest_size));
+ POSIX_GUARD(s2n_hmac_digest_size(mac->alg, &mac_digest_size));
- gte_check(payload_length, mac_digest_size);
+ POSIX_ENSURE_GTE(payload_length, mac_digest_size);
payload_length -= mac_digest_size;
/* Compute non-payload parts of the MAC(seq num, type, proto vers, fragment length) for composite ciphers.
@@ -63,51 +63,51 @@ int s2n_record_parse_composite(
/* In the decrypt case, this outputs the MAC digest length:
* https://github.com/openssl/openssl/blob/master/crypto/evp/e_aes_cbc_hmac_sha1.c#L842 */
int mac_size = 0;
- GUARD(cipher_suite->record_alg->cipher->io.comp.initial_hmac(session_key, sequence_number, content_type, conn->actual_protocol_version, payload_length, &mac_size));
+ POSIX_GUARD(cipher_suite->record_alg->cipher->io.comp.initial_hmac(session_key, sequence_number, content_type, conn->actual_protocol_version, payload_length, &mac_size));
- gte_check(payload_length, mac_size);
+ POSIX_ENSURE_GTE(payload_length, mac_size);
payload_length -= mac_size;
/* Adjust payload_length for explicit IV */
if (conn->actual_protocol_version > S2N_TLS10) {
uint32_t out = 0;
- GUARD(s2n_sub_overflow(payload_length, cipher_suite->record_alg->cipher->io.comp.record_iv_size, &out));
+ POSIX_GUARD(s2n_sub_overflow(payload_length, cipher_suite->record_alg->cipher->io.comp.record_iv_size, &out));
payload_length = out;
}
/* Decrypt stuff! */
- ne_check(en.size, 0);
- eq_check(en.size % iv.size, 0);
+ POSIX_ENSURE_NE(en.size, 0);
+ POSIX_ENSURE_EQ(en.size % iv.size, 0);
/* Copy the last encrypted block to be the next IV */
- memcpy_check(ivpad, en.data + en.size - iv.size, iv.size);
+ POSIX_CHECKED_MEMCPY(ivpad, en.data + en.size - iv.size, iv.size);
/* This will: Skip the explicit IV(if applicable), decrypt the payload, verify the MAC and padding. */
- GUARD((cipher_suite->record_alg->cipher->io.comp.decrypt(session_key, &iv, &en, &en)));
+ POSIX_GUARD((cipher_suite->record_alg->cipher->io.comp.decrypt(session_key, &iv, &en, &en)));
- memcpy_check(implicit_iv, ivpad, iv.size);
+ POSIX_CHECKED_MEMCPY(implicit_iv, ivpad, iv.size);
/* Subtract the padding length */
- gt_check(en.size, 0);
+ POSIX_ENSURE_GT(en.size, 0);
uint32_t out = 0;
- GUARD(s2n_sub_overflow(payload_length, en.data[en.size - 1] + 1, &out));
+ POSIX_GUARD(s2n_sub_overflow(payload_length, en.data[en.size - 1] + 1, &out));
payload_length = out;
struct s2n_blob seq = {.data = sequence_number,.size = S2N_TLS_SEQUENCE_NUM_LEN };
- GUARD(s2n_increment_sequence_number(&seq));
+ POSIX_GUARD(s2n_increment_sequence_number(&seq));
/* O.k., we've successfully read and decrypted the record, now we need to align the stuffer
* for reading the plaintext data.
*/
- GUARD(s2n_stuffer_reread(&conn->in));
- GUARD(s2n_stuffer_reread(&conn->header_in));
+ POSIX_GUARD(s2n_stuffer_reread(&conn->in));
+ POSIX_GUARD(s2n_stuffer_reread(&conn->header_in));
/* Skip the IV, if any */
if (conn->actual_protocol_version > S2N_TLS10) {
- GUARD(s2n_stuffer_skip_read(&conn->in, cipher_suite->record_alg->cipher->io.comp.record_iv_size));
+ POSIX_GUARD(s2n_stuffer_skip_read(&conn->in, cipher_suite->record_alg->cipher->io.comp.record_iv_size));
}
/* Truncate and wipe the MAC and any padding */
- GUARD(s2n_stuffer_wipe_n(&conn->in, s2n_stuffer_data_available(&conn->in) - payload_length));
+ POSIX_GUARD(s2n_stuffer_wipe_n(&conn->in, s2n_stuffer_data_available(&conn->in) - payload_length));
conn->in_status = PLAINTEXT;
return 0;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_record_read_stream.c b/contrib/restricted/aws/s2n/tls/s2n_record_read_stream.c
index dc6d6cb93e..8145d74d6a 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_record_read_stream.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_record_read_stream.c
@@ -41,57 +41,57 @@ int s2n_record_parse_stream(
{
/* Add the header to the HMAC */
uint8_t *header = s2n_stuffer_raw_read(&conn->header_in, S2N_TLS_RECORD_HEADER_LENGTH);
- notnull_check(header);
+ POSIX_ENSURE_REF(header);
struct s2n_blob en = {.size = encrypted_length,.data = s2n_stuffer_raw_read(&conn->in, encrypted_length) };
- notnull_check(en.data);
+ POSIX_ENSURE_REF(en.data);
uint16_t payload_length = encrypted_length;
uint8_t mac_digest_size;
- GUARD(s2n_hmac_digest_size(mac->alg, &mac_digest_size));
+ POSIX_GUARD(s2n_hmac_digest_size(mac->alg, &mac_digest_size));
- gte_check(payload_length, mac_digest_size);
+ POSIX_ENSURE_GTE(payload_length, mac_digest_size);
payload_length -= mac_digest_size;
/* Decrypt stuff! */
- GUARD(cipher_suite->record_alg->cipher->io.stream.decrypt(session_key, &en, &en));
+ POSIX_GUARD(cipher_suite->record_alg->cipher->io.stream.decrypt(session_key, &en, &en));
/* Update the MAC */
header[3] = (payload_length >> 8);
header[4] = payload_length & 0xff;
- GUARD(s2n_hmac_reset(mac));
- GUARD(s2n_hmac_update(mac, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
+ POSIX_GUARD(s2n_hmac_reset(mac));
+ POSIX_GUARD(s2n_hmac_update(mac, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
if (conn->actual_protocol_version == S2N_SSLv3) {
- GUARD(s2n_hmac_update(mac, header, 1));
- GUARD(s2n_hmac_update(mac, header + 3, 2));
+ POSIX_GUARD(s2n_hmac_update(mac, header, 1));
+ POSIX_GUARD(s2n_hmac_update(mac, header + 3, 2));
} else {
- GUARD(s2n_hmac_update(mac, header, S2N_TLS_RECORD_HEADER_LENGTH));
+ POSIX_GUARD(s2n_hmac_update(mac, header, S2N_TLS_RECORD_HEADER_LENGTH));
}
struct s2n_blob seq = {.data = sequence_number,.size = S2N_TLS_SEQUENCE_NUM_LEN };
- GUARD(s2n_increment_sequence_number(&seq));
+ POSIX_GUARD(s2n_increment_sequence_number(&seq));
/* MAC check for streaming ciphers - no padding */
- GUARD(s2n_hmac_update(mac, en.data, payload_length));
+ POSIX_GUARD(s2n_hmac_update(mac, en.data, payload_length));
uint8_t check_digest[S2N_MAX_DIGEST_LEN];
- lte_check(mac_digest_size, sizeof(check_digest));
- GUARD(s2n_hmac_digest(mac, check_digest, mac_digest_size));
+ POSIX_ENSURE_LTE(mac_digest_size, sizeof(check_digest));
+ POSIX_GUARD(s2n_hmac_digest(mac, check_digest, mac_digest_size));
if (s2n_hmac_digest_verify(en.data + payload_length, check_digest, mac_digest_size) < 0) {
- GUARD(s2n_stuffer_wipe(&conn->in));
- S2N_ERROR(S2N_ERR_BAD_MESSAGE);
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->in));
+ POSIX_BAIL(S2N_ERR_BAD_MESSAGE);
}
/* O.k., we've successfully read and decrypted the record, now we need to align the stuffer
* for reading the plaintext data.
*/
- GUARD(s2n_stuffer_reread(&conn->in));
- GUARD(s2n_stuffer_reread(&conn->header_in));
+ POSIX_GUARD(s2n_stuffer_reread(&conn->in));
+ POSIX_GUARD(s2n_stuffer_reread(&conn->header_in));
/* Truncate and wipe the MAC and any padding */
- GUARD(s2n_stuffer_wipe_n(&conn->in, s2n_stuffer_data_available(&conn->in) - payload_length));
+ POSIX_GUARD(s2n_stuffer_wipe_n(&conn->in, s2n_stuffer_data_available(&conn->in) - payload_length));
conn->in_status = PLAINTEXT;
return 0;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_record_write.c b/contrib/restricted/aws/s2n/tls/s2n_record_write.c
index df28a9d58e..14cda738ff 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_record_write.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_record_write.c
@@ -38,8 +38,8 @@ extern uint8_t s2n_unknown_protocol_version;
/* How much overhead does the IV, MAC, TAG and padding bytes introduce ? */
static S2N_RESULT s2n_tls_record_overhead(struct s2n_connection *conn, uint16_t *out)
{
- ENSURE_REF(conn);
- ENSURE_MUT(out);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_MUT(out);
struct s2n_crypto_parameters *active = conn->server;
if (conn->mode == S2N_CLIENT) {
@@ -47,7 +47,7 @@ static S2N_RESULT s2n_tls_record_overhead(struct s2n_connection *conn, uint16_t
}
uint8_t extra;
- GUARD_AS_RESULT(s2n_hmac_digest_size(active->cipher_suite->record_alg->hmac_alg, &extra));
+ RESULT_GUARD_POSIX(s2n_hmac_digest_size(active->cipher_suite->record_alg->hmac_alg, &extra));
if (active->cipher_suite->record_alg->cipher->type == S2N_CBC) {
/* Subtract one for the padding length byte */
@@ -73,25 +73,41 @@ static S2N_RESULT s2n_tls_record_overhead(struct s2n_connection *conn, uint16_t
*/
S2N_RESULT s2n_record_max_write_payload_size(struct s2n_connection *conn, uint16_t *max_fragment_size)
{
- ENSURE_REF(conn);
- ENSURE_MUT(max_fragment_size);
- ENSURE(conn->max_outgoing_fragment_length > 0, S2N_ERR_FRAGMENT_LENGTH_TOO_SMALL);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_MUT(max_fragment_size);
+ RESULT_ENSURE(conn->max_outgoing_fragment_length > 0, S2N_ERR_FRAGMENT_LENGTH_TOO_SMALL);
*max_fragment_size = MIN(conn->max_outgoing_fragment_length, S2N_TLS_MAXIMUM_FRAGMENT_LENGTH);
return S2N_RESULT_OK;
}
+S2N_RESULT s2n_record_max_write_size(struct s2n_connection *conn, uint16_t max_fragment_size, uint16_t *max_record_size)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_MUT(max_record_size);
+
+ if(!IS_NEGOTIATED(conn)) {
+ *max_record_size = S2N_TLS_MAX_RECORD_LEN_FOR(max_fragment_size);
+ } else if (conn->actual_protocol_version < S2N_TLS13) {
+ *max_record_size = S2N_TLS12_MAX_RECORD_LEN_FOR(max_fragment_size);
+ } else {
+ *max_record_size = S2N_TLS13_MAX_RECORD_LEN_FOR(max_fragment_size);
+ }
+ return S2N_RESULT_OK;
+}
+
/* Find the largest size that will fit within an ethernet frame for a "small" payload */
S2N_RESULT s2n_record_min_write_payload_size(struct s2n_connection *conn, uint16_t *payload_size)
{
- ENSURE_REF(conn);
- ENSURE_MUT(payload_size);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_MUT(payload_size);
+
/* remove ethernet, TCP/IP and TLS header overheads */
const uint16_t min_outgoing_fragment_length = ETH_MTU - (conn->ipv6 ? IP_V6_HEADER_LENGTH : IP_V4_HEADER_LENGTH)
- TCP_HEADER_LENGTH - TCP_OPTIONS_LENGTH - S2N_TLS_RECORD_HEADER_LENGTH;
- ENSURE(min_outgoing_fragment_length <= S2N_TLS_MAXIMUM_FRAGMENT_LENGTH, S2N_ERR_FRAGMENT_LENGTH_TOO_LARGE);
+ RESULT_ENSURE(min_outgoing_fragment_length <= S2N_TLS_MAXIMUM_FRAGMENT_LENGTH, S2N_ERR_FRAGMENT_LENGTH_TOO_LARGE);
uint16_t size = min_outgoing_fragment_length;
const struct s2n_crypto_parameters *active = conn->mode == S2N_CLIENT ? conn->client : conn->server;
@@ -107,14 +123,20 @@ S2N_RESULT s2n_record_min_write_payload_size(struct s2n_connection *conn, uint16
size -= 1;
}
+ /* If TLS1.3, remove content type */
+ if (conn->actual_protocol_version >= S2N_TLS13) {
+ RESULT_ENSURE(size > S2N_TLS_CONTENT_TYPE_LENGTH, S2N_ERR_FRAGMENT_LENGTH_TOO_SMALL);
+ size -= S2N_TLS_CONTENT_TYPE_LENGTH;
+ }
+
/* subtract overheads of a TLS record */
uint16_t overhead = 0;
- GUARD_RESULT(s2n_tls_record_overhead(conn, &overhead));
- ENSURE(size > overhead, S2N_ERR_FRAGMENT_LENGTH_TOO_SMALL);
+ RESULT_GUARD(s2n_tls_record_overhead(conn, &overhead));
+ RESULT_ENSURE(size > overhead, S2N_ERR_FRAGMENT_LENGTH_TOO_SMALL);
size -= overhead;
- ENSURE(size > 0, S2N_ERR_FRAGMENT_LENGTH_TOO_SMALL);
- ENSURE(size <= ETH_MTU, S2N_ERR_FRAGMENT_LENGTH_TOO_LARGE);
+ RESULT_ENSURE(size > 0, S2N_ERR_FRAGMENT_LENGTH_TOO_SMALL);
+ RESULT_ENSURE(size <= ETH_MTU, S2N_ERR_FRAGMENT_LENGTH_TOO_LARGE);
*payload_size = size;
@@ -124,7 +146,8 @@ S2N_RESULT s2n_record_min_write_payload_size(struct s2n_connection *conn, uint16
int s2n_record_write_protocol_version(struct s2n_connection *conn)
{
uint8_t record_protocol_version = conn->actual_protocol_version;
- if (conn->server_protocol_version == s2n_unknown_protocol_version) {
+ if (conn->server_protocol_version == s2n_unknown_protocol_version
+ && conn->early_data_state != S2N_EARLY_DATA_REQUESTED) {
/* Some legacy TLS implementations can't handle records with protocol version higher than TLS1.0.
* To provide maximum compatibility, send record version as TLS1.0 if server protocol version isn't
* established yet, which happens only during ClientHello message. Note, this has no effect on
@@ -141,7 +164,7 @@ int s2n_record_write_protocol_version(struct s2n_connection *conn)
protocol_version[0] = record_protocol_version / 10;
protocol_version[1] = record_protocol_version % 10;
- GUARD(s2n_stuffer_write_bytes(&conn->out, protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&conn->out, protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
return 0;
}
@@ -155,34 +178,34 @@ static inline int s2n_record_encrypt(
struct s2n_blob *en,
uint8_t *implicit_iv, uint16_t block_size)
{
- notnull_check(en->data);
+ POSIX_ENSURE_REF(en->data);
switch (cipher_suite->record_alg->cipher->type) {
case S2N_STREAM:
- GUARD(cipher_suite->record_alg->cipher->io.stream.encrypt(session_key, en, en));
+ POSIX_GUARD(cipher_suite->record_alg->cipher->io.stream.encrypt(session_key, en, en));
break;
case S2N_CBC:
- GUARD(cipher_suite->record_alg->cipher->io.cbc.encrypt(session_key, iv, en, en));
+ POSIX_GUARD(cipher_suite->record_alg->cipher->io.cbc.encrypt(session_key, iv, en, en));
/* Copy the last encrypted block to be the next IV */
if (conn->actual_protocol_version < S2N_TLS11) {
- gte_check(en->size, block_size);
- memcpy_check(implicit_iv, en->data + en->size - block_size, block_size);
+ POSIX_ENSURE_GTE(en->size, block_size);
+ POSIX_CHECKED_MEMCPY(implicit_iv, en->data + en->size - block_size, block_size);
}
break;
case S2N_AEAD:
- GUARD(cipher_suite->record_alg->cipher->io.aead.encrypt(session_key, iv, aad, en, en));
+ POSIX_GUARD(cipher_suite->record_alg->cipher->io.aead.encrypt(session_key, iv, aad, en, en));
break;
case S2N_COMPOSITE:
/* This will: compute mac, append padding, append padding length, and encrypt */
- GUARD(cipher_suite->record_alg->cipher->io.comp.encrypt(session_key, iv, en, en));
+ POSIX_GUARD(cipher_suite->record_alg->cipher->io.comp.encrypt(session_key, iv, en, en));
/* Copy the last encrypted block to be the next IV */
- gte_check(en->size, block_size);
- memcpy_check(implicit_iv, en->data + en->size - block_size, block_size);
+ POSIX_ENSURE_GTE(en->size, block_size);
+ POSIX_CHECKED_MEMCPY(implicit_iv, en->data + en->size - block_size, block_size);
break;
default:
- S2N_ERROR(S2N_ERR_CIPHER_TYPE);
+ POSIX_BAIL(S2N_ERR_CIPHER_TYPE);
break;
}
@@ -218,23 +241,29 @@ int s2n_record_writev(struct s2n_connection *conn, uint8_t content_type, const s
implicit_iv = conn->client->client_implicit_iv;
}
+ /* The NULL stream cipher MUST NEVER be used for ApplicationData.
+ * Writing ApplicationData unencrypted defeats the purpose of TLS. */
+ if (cipher_suite->record_alg->cipher == &s2n_null_cipher) {
+ POSIX_ENSURE(content_type != TLS_APPLICATION_DATA, S2N_ERR_ENCRYPT);
+ }
+
const int is_tls13_record = cipher_suite->record_alg->flags & S2N_TLS13_RECORD_AEAD_NONCE;
s2n_stack_blob(aad, is_tls13_record ? S2N_TLS13_AAD_LEN : S2N_TLS_MAX_AAD_LEN, S2N_TLS_MAX_AAD_LEN);
S2N_ERROR_IF(s2n_stuffer_data_available(&conn->out), S2N_ERR_RECORD_STUFFER_NEEDS_DRAINING);
uint8_t mac_digest_size;
- GUARD(s2n_hmac_digest_size(mac->alg, &mac_digest_size));
+ POSIX_GUARD(s2n_hmac_digest_size(mac->alg, &mac_digest_size));
/* Before we do anything, we need to figure out what the length of the
* fragment is going to be.
*/
uint16_t max_write_payload_size = 0;
- GUARD_AS_POSIX(s2n_record_max_write_payload_size(conn, &max_write_payload_size));
+ POSIX_GUARD_RESULT(s2n_record_max_write_payload_size(conn, &max_write_payload_size));
const uint16_t data_bytes_to_take = MIN(to_write, max_write_payload_size);
uint16_t extra = 0;
- GUARD_AS_POSIX(s2n_tls_record_overhead(conn, &extra));
+ POSIX_GUARD_RESULT(s2n_tls_record_overhead(conn, &extra));
/* If we have padding to worry about, figure that out too */
if (cipher_suite->record_alg->cipher->type == S2N_CBC) {
@@ -247,25 +276,40 @@ int s2n_record_writev(struct s2n_connection *conn, uint8_t content_type, const s
}
/* Start the MAC with the sequence number */
- GUARD(s2n_hmac_update(mac, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
-
- GUARD(s2n_stuffer_resize_if_empty(&conn->out, S2N_LARGE_RECORD_LENGTH));
+ POSIX_GUARD(s2n_hmac_update(mac, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
+
+ if (s2n_stuffer_is_freed(&conn->out)) {
+ /* If the output buffer has not been allocated yet, allocate enough memory to hold
+ * a record with the local maximum fragment length. Because this only occurs if the
+ * output buffer has not been allocated, it does NOT resize existing buffers.
+ *
+ * The maximum fragment length is:
+ * 1) The local default configured for new connections
+ * 2) The local value set by the user via s2n_connection_prefer_throughput()
+ * or s2n_connection_prefer_low_latency()
+ * 3) On the server, the minimum of the local value and the value negotiated with the
+ * client via the max_fragment_length extension
+ */
+ uint16_t max_wire_record_size = 0;
+ POSIX_GUARD_RESULT(s2n_record_max_write_size(conn, max_write_payload_size, &max_wire_record_size));
+ POSIX_GUARD(s2n_stuffer_growable_alloc(&conn->out, max_wire_record_size));
+ }
/* Now that we know the length, start writing the record */
- GUARD(s2n_stuffer_write_uint8(&conn->out, is_tls13_record ?
+ POSIX_GUARD(s2n_stuffer_write_uint8(&conn->out, is_tls13_record ?
/* tls 1.3 opaque type */ TLS_APPLICATION_DATA :
/* actual content_type */ content_type ));
- GUARD(s2n_record_write_protocol_version(conn));
+ POSIX_GUARD(s2n_record_write_protocol_version(conn));
/* First write a header that has the payload length, this is for the MAC */
- GUARD(s2n_stuffer_write_uint16(&conn->out, data_bytes_to_take));
+ POSIX_GUARD(s2n_stuffer_write_uint16(&conn->out, data_bytes_to_take));
if (conn->actual_protocol_version > S2N_SSLv3) {
- GUARD(s2n_hmac_update(mac, conn->out.blob.data, S2N_TLS_RECORD_HEADER_LENGTH));
+ POSIX_GUARD(s2n_hmac_update(mac, conn->out.blob.data, S2N_TLS_RECORD_HEADER_LENGTH));
} else {
/* SSLv3 doesn't include the protocol version in the MAC */
- GUARD(s2n_hmac_update(mac, conn->out.blob.data, 1));
- GUARD(s2n_hmac_update(mac, conn->out.blob.data + 3, 2));
+ POSIX_GUARD(s2n_hmac_update(mac, conn->out.blob.data, 1));
+ POSIX_GUARD(s2n_hmac_update(mac, conn->out.blob.data + 3, 2));
}
/* Compute non-payload parts of the MAC(seq num, type, proto vers, fragment length) for composite ciphers.
@@ -280,14 +324,14 @@ int s2n_record_writev(struct s2n_connection *conn, uint8_t content_type, const s
/* Outputs number of extra bytes required for MAC and padding */
int pad_and_mac_len;
- GUARD(cipher_suite->record_alg->cipher->io.comp.initial_hmac(session_key, sequence_number, content_type, conn->actual_protocol_version,
+ POSIX_GUARD(cipher_suite->record_alg->cipher->io.comp.initial_hmac(session_key, sequence_number, content_type, conn->actual_protocol_version,
payload_and_eiv_len, &pad_and_mac_len));
extra += pad_and_mac_len;
}
/* TLS 1.3 protected record occupies one extra byte for content type */
if (is_tls13_record) {
- extra += TLS13_CONTENT_TYPE_LENGTH;
+ extra += S2N_TLS_CONTENT_TYPE_LENGTH;
}
/* Rewrite the length to be the actual fragment length */
@@ -295,48 +339,45 @@ int s2n_record_writev(struct s2n_connection *conn, uint8_t content_type, const s
/* ensure actual_fragment_length + S2N_TLS_RECORD_HEADER_LENGTH <= max record length */
const uint16_t max_record_length = is_tls13_record ? S2N_TLS13_MAXIMUM_RECORD_LENGTH : S2N_TLS_MAXIMUM_RECORD_LENGTH;
S2N_ERROR_IF(actual_fragment_length + S2N_TLS_RECORD_HEADER_LENGTH > max_record_length, S2N_ERR_RECORD_LENGTH_TOO_LARGE);
- GUARD(s2n_stuffer_wipe_n(&conn->out, 2));
- GUARD(s2n_stuffer_write_uint16(&conn->out, actual_fragment_length));
+ POSIX_GUARD(s2n_stuffer_wipe_n(&conn->out, 2));
+ POSIX_GUARD(s2n_stuffer_write_uint16(&conn->out, actual_fragment_length));
/* If we're AEAD, write the sequence number as an IV, and generate the AAD */
if (cipher_suite->record_alg->cipher->type == S2N_AEAD) {
struct s2n_stuffer iv_stuffer = {0};
s2n_blob_init(&iv, aad_iv, sizeof(aad_iv));
- GUARD(s2n_stuffer_init(&iv_stuffer, &iv));
+ POSIX_GUARD(s2n_stuffer_init(&iv_stuffer, &iv));
if (cipher_suite->record_alg->flags & S2N_TLS12_AES_GCM_AEAD_NONCE) {
/* Partially explicit nonce. See RFC 5288 Section 3 */
- GUARD(s2n_stuffer_write_bytes(&conn->out, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
- GUARD(s2n_stuffer_write_bytes(&iv_stuffer, implicit_iv, cipher_suite->record_alg->cipher->io.aead.fixed_iv_size));
- GUARD(s2n_stuffer_write_bytes(&iv_stuffer, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&conn->out, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&iv_stuffer, implicit_iv, cipher_suite->record_alg->cipher->io.aead.fixed_iv_size));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&iv_stuffer, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
} else if (cipher_suite->record_alg->flags & S2N_TLS12_CHACHA_POLY_AEAD_NONCE || is_tls13_record) {
/* Fully implicit nonce. See RFC7905 Section 2 */
uint8_t four_zeroes[4] = { 0 };
- GUARD(s2n_stuffer_write_bytes(&iv_stuffer, four_zeroes, 4));
- GUARD(s2n_stuffer_write_bytes(&iv_stuffer, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&iv_stuffer, four_zeroes, 4));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&iv_stuffer, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN));
for(int i = 0; i < cipher_suite->record_alg->cipher->io.aead.fixed_iv_size; i++) {
aad_iv[i] = aad_iv[i] ^ implicit_iv[i];
}
} else {
- S2N_ERROR(S2N_ERR_INVALID_NONCE_TYPE);
+ POSIX_BAIL(S2N_ERR_INVALID_NONCE_TYPE);
}
/* Set the IV size to the amount of data written */
iv.size = s2n_stuffer_data_available(&iv_stuffer);
-
- struct s2n_stuffer ad_stuffer = {0};
- GUARD(s2n_stuffer_init(&ad_stuffer, &aad));
if (is_tls13_record) {
- GUARD_AS_POSIX(s2n_tls13_aead_aad_init(data_bytes_to_take + TLS13_CONTENT_TYPE_LENGTH, cipher_suite->record_alg->cipher->io.aead.tag_size, &ad_stuffer));
+ POSIX_GUARD_RESULT(s2n_tls13_aead_aad_init(data_bytes_to_take + S2N_TLS_CONTENT_TYPE_LENGTH, cipher_suite->record_alg->cipher->io.aead.tag_size, &aad));
} else {
- GUARD_AS_POSIX(s2n_aead_aad_init(conn, sequence_number, content_type, data_bytes_to_take, &ad_stuffer));
+ POSIX_GUARD_RESULT(s2n_aead_aad_init(conn, sequence_number, content_type, data_bytes_to_take, &aad));
}
} else if (cipher_suite->record_alg->cipher->type == S2N_CBC || cipher_suite->record_alg->cipher->type == S2N_COMPOSITE) {
s2n_blob_init(&iv, implicit_iv, block_size);
/* For TLS1.1/1.2; write the IV with random data */
if (conn->actual_protocol_version > S2N_TLS10) {
- GUARD_AS_POSIX(s2n_get_public_random_data(&iv));
+ POSIX_GUARD_RESULT(s2n_get_public_random_data(&iv));
if (cipher_suite->record_alg->cipher->type == S2N_COMPOSITE) {
/* Write a separate random block to the record. This will be used along with the previously generated
* iv blob to generate the final explicit_iv for this record.
@@ -353,37 +394,37 @@ int s2n_record_writev(struct s2n_connection *conn, uint8_t content_type, const s
*/
struct s2n_blob explicit_iv_placeholder;
uint8_t zero_block[S2N_TLS_MAX_IV_LEN] = { 0 };
- GUARD(s2n_blob_init(&explicit_iv_placeholder, zero_block, block_size));
- GUARD_AS_POSIX(s2n_get_public_random_data(&explicit_iv_placeholder));
- GUARD(s2n_stuffer_write(&conn->out, &explicit_iv_placeholder));
+ POSIX_GUARD(s2n_blob_init(&explicit_iv_placeholder, zero_block, block_size));
+ POSIX_GUARD_RESULT(s2n_get_public_random_data(&explicit_iv_placeholder));
+ POSIX_GUARD(s2n_stuffer_write(&conn->out, &explicit_iv_placeholder));
} else {
/* We can write the explicit IV directly to the record for non composite CBC because
* s2n starts AES *after* the explicit IV.
*/
- GUARD(s2n_stuffer_write(&conn->out, &iv));
+ POSIX_GUARD(s2n_stuffer_write(&conn->out, &iv));
}
}
}
/* We are done with this sequence number, so we can increment it */
struct s2n_blob seq = {.data = sequence_number,.size = S2N_TLS_SEQUENCE_NUM_LEN };
- GUARD(s2n_increment_sequence_number(&seq));
+ POSIX_GUARD(s2n_increment_sequence_number(&seq));
/* Write the plaintext data */
- GUARD(s2n_stuffer_writev_bytes(&conn->out, in, in_count, offs, data_bytes_to_take));
+ POSIX_GUARD(s2n_stuffer_writev_bytes(&conn->out, in, in_count, offs, data_bytes_to_take));
void *orig_write_ptr = conn->out.blob.data + conn->out.write_cursor - data_bytes_to_take;
- GUARD(s2n_hmac_update(mac, orig_write_ptr, data_bytes_to_take));
+ POSIX_GUARD(s2n_hmac_update(mac, orig_write_ptr, data_bytes_to_take));
/* Write the digest */
uint8_t *digest = s2n_stuffer_raw_write(&conn->out, mac_digest_size);
- notnull_check(digest);
+ POSIX_ENSURE_REF(digest);
- GUARD(s2n_hmac_digest(mac, digest, mac_digest_size));
- GUARD(s2n_hmac_reset(mac));
+ POSIX_GUARD(s2n_hmac_digest(mac, digest, mac_digest_size));
+ POSIX_GUARD(s2n_hmac_reset(mac));
/* Write content type for TLS 1.3 record (RFC 8446 Section 5.2) */
if (is_tls13_record) {
- GUARD(s2n_stuffer_write_uint8(&conn->out, content_type));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&conn->out, content_type));
}
if (cipher_suite->record_alg->cipher->type == S2N_CBC) {
@@ -391,30 +432,30 @@ int s2n_record_writev(struct s2n_connection *conn, uint8_t content_type, const s
* include an extra padding length byte, also with the value 'p'.
*/
for (int i = 0; i <= padding; i++) {
- GUARD(s2n_stuffer_write_uint8(&conn->out, padding));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&conn->out, padding));
}
}
/* Rewind to rewrite/encrypt the packet */
- GUARD(s2n_stuffer_rewrite(&conn->out));
+ POSIX_GUARD(s2n_stuffer_rewrite(&conn->out));
/* Skip the header */
- GUARD(s2n_stuffer_skip_write(&conn->out, S2N_TLS_RECORD_HEADER_LENGTH));
+ POSIX_GUARD(s2n_stuffer_skip_write(&conn->out, S2N_TLS_RECORD_HEADER_LENGTH));
uint16_t encrypted_length = data_bytes_to_take + mac_digest_size;
switch (cipher_suite->record_alg->cipher->type) {
case S2N_AEAD:
- GUARD(s2n_stuffer_skip_write(&conn->out, cipher_suite->record_alg->cipher->io.aead.record_iv_size));
+ POSIX_GUARD(s2n_stuffer_skip_write(&conn->out, cipher_suite->record_alg->cipher->io.aead.record_iv_size));
encrypted_length += cipher_suite->record_alg->cipher->io.aead.tag_size;
if (is_tls13_record) {
/* one extra byte for content type */
- encrypted_length += TLS13_CONTENT_TYPE_LENGTH;
+ encrypted_length += S2N_TLS_CONTENT_TYPE_LENGTH;
}
break;
case S2N_CBC:
if (conn->actual_protocol_version > S2N_TLS10) {
/* Leave the IV alone and unencrypted */
- GUARD(s2n_stuffer_skip_write(&conn->out, iv.size));
+ POSIX_GUARD(s2n_stuffer_skip_write(&conn->out, iv.size));
}
/* Encrypt the padding and the padding length byte too */
encrypted_length += padding + 1;
@@ -434,7 +475,7 @@ int s2n_record_writev(struct s2n_connection *conn, uint8_t content_type, const s
/* Do the encryption */
struct s2n_blob en = { .size = encrypted_length, .data = s2n_stuffer_raw_write(&conn->out, encrypted_length) };
- GUARD(s2n_record_encrypt(conn, cipher_suite, session_key, &iv, &aad, &en, implicit_iv, block_size));
+ POSIX_GUARD(s2n_record_encrypt(conn, cipher_suite, session_key, &iv, &aad, &en, implicit_iv, block_size));
if (conn->actual_protocol_version == S2N_TLS13 && content_type == TLS_CHANGE_CIPHER_SPEC) {
conn->client = current_client_crypto;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_recv.c b/contrib/restricted/aws/s2n/tls/s2n_recv.c
index 8aa34e6d06..3d0907573f 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_recv.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_recv.c
@@ -20,7 +20,7 @@
#include <unistd.h>
#include <errno.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include "error/s2n_errno.h"
@@ -47,12 +47,12 @@ S2N_RESULT s2n_read_in_bytes(struct s2n_connection *conn, struct s2n_stuffer *ou
int r = s2n_connection_recv_stuffer(output, conn, remaining);
if (r == 0) {
conn->closed = 1;
- BAIL(S2N_ERR_CLOSED);
+ RESULT_BAIL(S2N_ERR_CLOSED);
} else if (r < 0) {
if (errno == EWOULDBLOCK || errno == EAGAIN) {
- BAIL(S2N_ERR_IO_BLOCKED);
+ RESULT_BAIL(S2N_ERR_IO_BLOCKED);
}
- BAIL(S2N_ERR_IO);
+ RESULT_BAIL(S2N_ERR_IO);
}
conn->wire_bytes_in += r;
}
@@ -70,10 +70,10 @@ int s2n_read_full_record(struct s2n_connection *conn, uint8_t * record_type, int
*record_type = TLS_APPLICATION_DATA;
return S2N_SUCCESS;
}
- GUARD(s2n_stuffer_resize_if_empty(&conn->in, S2N_LARGE_FRAGMENT_LENGTH));
+ POSIX_GUARD(s2n_stuffer_resize_if_empty(&conn->in, S2N_LARGE_FRAGMENT_LENGTH));
/* Read the record until we at least have a header */
- GUARD_AS_POSIX(s2n_read_in_bytes(conn, &conn->header_in, S2N_TLS_RECORD_HEADER_LENGTH));
+ POSIX_GUARD_RESULT(s2n_read_in_bytes(conn, &conn->header_in, S2N_TLS_RECORD_HEADER_LENGTH));
uint16_t fragment_length;
@@ -82,28 +82,25 @@ int s2n_read_full_record(struct s2n_connection *conn, uint8_t * record_type, int
conn->header_in.blob.data[0] &= 0x7f;
*isSSLv2 = 1;
- if (s2n_sslv2_record_header_parse(conn, record_type, &conn->client_protocol_version, &fragment_length) < 0) {
- GUARD(s2n_connection_kill(conn));
- S2N_ERROR_PRESERVE_ERRNO();
- }
+ WITH_ERROR_BLINDING(conn, POSIX_GUARD(
+ s2n_sslv2_record_header_parse(conn, record_type, &conn->client_protocol_version, &fragment_length)));
} else {
- if (s2n_record_header_parse(conn, record_type, &fragment_length) < 0) {
- GUARD(s2n_connection_kill(conn));
- S2N_ERROR_PRESERVE_ERRNO();
- }
+ WITH_ERROR_BLINDING(conn, POSIX_GUARD(
+ s2n_record_header_parse(conn, record_type, &fragment_length)));
}
/* Read enough to have the whole record */
- GUARD_AS_POSIX(s2n_read_in_bytes(conn, &conn->in, fragment_length));
+ POSIX_GUARD_RESULT(s2n_read_in_bytes(conn, &conn->in, fragment_length));
if (*isSSLv2) {
return 0;
}
/* Decrypt and parse the record */
- if (s2n_record_parse(conn) < 0) {
- GUARD(s2n_connection_kill(conn));
- S2N_ERROR_PRESERVE_ERRNO();
+ if (s2n_early_data_is_trial_decryption_allowed(conn, *record_type)) {
+ POSIX_ENSURE(s2n_record_parse(conn) >= S2N_SUCCESS, S2N_ERR_EARLY_DATA_TRIAL_DECRYPT);
+ } else {
+ WITH_ERROR_BLINDING(conn, POSIX_GUARD(s2n_record_parse(conn)));
}
/* In TLS 1.3, encrypted handshake records would appear to be of record type
@@ -111,7 +108,7 @@ int s2n_read_full_record(struct s2n_connection *conn, uint8_t * record_type, int
* is decrypted.
*/
if (conn->actual_protocol_version == S2N_TLS13 && *record_type == TLS_APPLICATION_DATA) {
- GUARD(s2n_tls13_parse_record_type(&conn->in, record_type));
+ POSIX_GUARD(s2n_tls13_parse_record_type(&conn->in, record_type));
}
return 0;
@@ -127,7 +124,8 @@ ssize_t s2n_recv_impl(struct s2n_connection * conn, void *buf, ssize_t size, s2n
}
*blocked = S2N_BLOCKED_ON_READ;
- S2N_ERROR_IF(conn->config->quic_enabled, S2N_ERR_UNSUPPORTED_WITH_QUIC);
+ POSIX_ENSURE(!s2n_connection_is_quic_enabled(conn), S2N_ERR_UNSUPPORTED_WITH_QUIC);
+ POSIX_GUARD_RESULT(s2n_early_data_validate_recv(conn));
while (size && !conn->closed) {
int isSSLv2 = 0;
@@ -163,22 +161,22 @@ ssize_t s2n_recv_impl(struct s2n_connection * conn, void *buf, ssize_t size, s2n
switch (record_type)
{
case TLS_ALERT:
- GUARD(s2n_process_alert_fragment(conn));
- GUARD(s2n_flush(conn, blocked));
+ POSIX_GUARD(s2n_process_alert_fragment(conn));
+ POSIX_GUARD(s2n_flush(conn, blocked));
break;
case TLS_HANDSHAKE:
- GUARD(s2n_post_handshake_recv(conn));
+ WITH_ERROR_BLINDING(conn, POSIX_GUARD(s2n_post_handshake_recv(conn)));
break;
}
- GUARD(s2n_stuffer_wipe(&conn->header_in));
- GUARD(s2n_stuffer_wipe(&conn->in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->header_in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->in));
conn->in_status = ENCRYPTED;
continue;
}
out.size = MIN(size, s2n_stuffer_data_available(&conn->in));
- GUARD(s2n_stuffer_erase_and_read(&conn->in, &out));
+ POSIX_GUARD(s2n_stuffer_erase_and_read(&conn->in, &out));
bytes_read += out.size;
out.data += out.size;
@@ -186,8 +184,8 @@ ssize_t s2n_recv_impl(struct s2n_connection * conn, void *buf, ssize_t size, s2n
/* Are we ready for more encrypted data? */
if (s2n_stuffer_data_available(&conn->in) == 0) {
- GUARD(s2n_stuffer_wipe(&conn->header_in));
- GUARD(s2n_stuffer_wipe(&conn->in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->header_in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->in));
conn->in_status = ENCRYPTED;
}
@@ -201,12 +199,13 @@ ssize_t s2n_recv_impl(struct s2n_connection * conn, void *buf, ssize_t size, s2n
*blocked = S2N_NOT_BLOCKED;
}
+ POSIX_GUARD_RESULT(s2n_early_data_record_bytes(conn, bytes_read));
return bytes_read;
}
ssize_t s2n_recv(struct s2n_connection * conn, void *buf, ssize_t size, s2n_blocked_status * blocked)
{
- ENSURE_POSIX(!conn->recv_in_use, S2N_ERR_REENTRANCY);
+ POSIX_ENSURE(!conn->recv_in_use, S2N_ERR_REENTRANCY);
conn->recv_in_use = true;
ssize_t result = s2n_recv_impl(conn, buf, size, blocked);
conn->recv_in_use = false;
@@ -223,14 +222,14 @@ int s2n_recv_close_notify(struct s2n_connection *conn, s2n_blocked_status * bloc
int isSSLv2;
*blocked = S2N_BLOCKED_ON_READ;
- GUARD(s2n_read_full_record(conn, &record_type, &isSSLv2));
+ POSIX_GUARD(s2n_read_full_record(conn, &record_type, &isSSLv2));
S2N_ERROR_IF(isSSLv2, S2N_ERR_BAD_MESSAGE);
S2N_ERROR_IF(record_type != TLS_ALERT, S2N_ERR_SHUTDOWN_RECORD_TYPE);
/* Only succeeds for an incoming close_notify alert */
- GUARD(s2n_process_alert_fragment(conn));
+ POSIX_GUARD(s2n_process_alert_fragment(conn));
*blocked = S2N_NOT_BLOCKED;
return 0;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_resume.c b/contrib/restricted/aws/s2n/tls/s2n_resume.c
index f4571bf042..31ba8e97a0 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_resume.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_resume.c
@@ -13,8 +13,9 @@
* permissions and limitations under the License.
*/
#include <math.h>
+#include <sys/param.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include "error/s2n_errno.h"
#include "stuffer/s2n_stuffer.h"
@@ -33,128 +34,331 @@ int s2n_allowed_to_cache_connection(struct s2n_connection *conn)
{
/* We're unable to cache connections with a Client Cert since we currently don't serialize the Client Cert,
* which means that callers won't have access to the Client's Cert if the connection is resumed. */
- if (s2n_connection_is_client_auth_enabled(conn) > 0) {
+ if (s2n_connection_is_client_auth_enabled(conn)) {
return 0;
}
struct s2n_config *config = conn->config;
- notnull_check(config);
+ POSIX_ENSURE_REF(config);
return config->use_session_cache;
}
-static int s2n_serialize_resumption_state(struct s2n_connection *conn, struct s2n_stuffer *to)
+static int s2n_tls12_serialize_resumption_state(struct s2n_connection *conn, struct s2n_stuffer *to)
{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(to);
+
uint64_t now;
- S2N_ERROR_IF(s2n_stuffer_space_remaining(to) < S2N_STATE_SIZE_IN_BYTES, S2N_ERR_STUFFER_IS_FULL);
+ S2N_ERROR_IF(s2n_stuffer_space_remaining(to) < S2N_TLS12_STATE_SIZE_IN_BYTES, S2N_ERR_STUFFER_IS_FULL);
/* Get the time */
- GUARD(conn->config->wall_clock(conn->config->sys_clock_ctx, &now));
+ POSIX_GUARD(conn->config->wall_clock(conn->config->sys_clock_ctx, &now));
/* Write the entry */
- GUARD(s2n_stuffer_write_uint8(to, S2N_SERIALIZED_FORMAT_VERSION));
- GUARD(s2n_stuffer_write_uint8(to, conn->actual_protocol_version));
- GUARD(s2n_stuffer_write_bytes(to, conn->secure.cipher_suite->iana_value, S2N_TLS_CIPHER_SUITE_LEN));
- GUARD(s2n_stuffer_write_uint64(to, now));
- GUARD(s2n_stuffer_write_bytes(to, conn->secure.master_secret, S2N_TLS_SECRET_LEN));
+ POSIX_GUARD(s2n_stuffer_write_uint8(to, S2N_SERIALIZED_FORMAT_TLS12_V3));
+ POSIX_GUARD(s2n_stuffer_write_uint8(to, conn->actual_protocol_version));
+ POSIX_GUARD(s2n_stuffer_write_bytes(to, conn->secure.cipher_suite->iana_value, S2N_TLS_CIPHER_SUITE_LEN));
+ POSIX_GUARD(s2n_stuffer_write_uint64(to, now));
+ POSIX_GUARD(s2n_stuffer_write_bytes(to, conn->secrets.tls12.master_secret, S2N_TLS_SECRET_LEN));
+ POSIX_GUARD(s2n_stuffer_write_uint8(to, conn->ems_negotiated));
- return 0;
+ return S2N_SUCCESS;
}
-static int s2n_deserialize_resumption_state(struct s2n_connection *conn, struct s2n_stuffer *from)
+static S2N_RESULT s2n_tls13_serialize_keying_material_expiration(struct s2n_connection *conn,
+ uint64_t now, struct s2n_stuffer *out)
{
- uint8_t format;
- uint8_t protocol_version;
- uint8_t cipher_suite[S2N_TLS_CIPHER_SUITE_LEN];
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(out);
+
+ if (conn->mode != S2N_SERVER) {
+ return S2N_RESULT_OK;
+ }
+
+ uint64_t expiration_timestamp = now + (conn->server_keying_material_lifetime * (uint64_t) ONE_SEC_IN_NANOS);
+
+ struct s2n_psk *chosen_psk = conn->psk_params.chosen_psk;
+ if (chosen_psk && chosen_psk->type == S2N_PSK_TYPE_RESUMPTION) {
+ expiration_timestamp = MIN(chosen_psk->keying_material_expiration, expiration_timestamp);
+ }
- S2N_ERROR_IF(s2n_stuffer_data_available(from) < S2N_STATE_SIZE_IN_BYTES, S2N_ERR_STUFFER_OUT_OF_DATA);
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint64(out, expiration_timestamp));
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_tls13_serialize_resumption_state(struct s2n_connection *conn, struct s2n_stuffer *out)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(out);
- GUARD(s2n_stuffer_read_uint8(from, &format));
- S2N_ERROR_IF(format != S2N_SERIALIZED_FORMAT_VERSION, S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
+ uint64_t current_time = 0;
+ struct s2n_ticket_fields *ticket_fields = &conn->tls13_ticket_fields;
- GUARD(s2n_stuffer_read_uint8(from, &protocol_version));
+ /* Get the time */
+ RESULT_GUARD_POSIX(conn->config->wall_clock(conn->config->sys_clock_ctx, &current_time));
+
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint8(out, S2N_SERIALIZED_FORMAT_TLS13_V1));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint8(out, conn->actual_protocol_version));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_bytes(out, conn->secure.cipher_suite->iana_value, S2N_TLS_CIPHER_SUITE_LEN));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint64(out, current_time));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint32(out, ticket_fields->ticket_age_add));
+ RESULT_ENSURE_LTE(ticket_fields->session_secret.size, UINT8_MAX);
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint8(out, ticket_fields->session_secret.size));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_bytes(out, ticket_fields->session_secret.data, ticket_fields->session_secret.size));
+ RESULT_GUARD(s2n_tls13_serialize_keying_material_expiration(conn, current_time, out));
+
+ uint32_t server_max_early_data = 0;
+ RESULT_GUARD(s2n_early_data_get_server_max_size(conn, &server_max_early_data));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint32(out, server_max_early_data));
+ if (server_max_early_data > 0) {
+ uint8_t application_protocol_len = strlen(conn->application_protocol);
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint8(out, application_protocol_len));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_bytes(out, (uint8_t *) conn->application_protocol, application_protocol_len));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint16(out, conn->server_early_data_context.size));
+ RESULT_GUARD_POSIX(s2n_stuffer_write(out, &conn->server_early_data_context));
+ }
+
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_serialize_resumption_state(struct s2n_connection *conn, struct s2n_stuffer *out)
+{
+ if(conn->actual_protocol_version < S2N_TLS13) {
+ RESULT_GUARD_POSIX(s2n_tls12_serialize_resumption_state(conn, out));
+ } else {
+ RESULT_GUARD(s2n_tls13_serialize_resumption_state(conn, out));
+ }
+ return S2N_RESULT_OK;
+}
+
+static int s2n_tls12_deserialize_resumption_state(struct s2n_connection *conn, struct s2n_stuffer *from)
+{
+ uint8_t protocol_version = 0;
+ uint8_t cipher_suite[S2N_TLS_CIPHER_SUITE_LEN] = { 0 };
+
+ S2N_ERROR_IF(s2n_stuffer_data_available(from) < S2N_TLS12_STATE_SIZE_IN_BYTES - sizeof(uint8_t), S2N_ERR_STUFFER_OUT_OF_DATA);
+
+ POSIX_GUARD(s2n_stuffer_read_uint8(from, &protocol_version));
S2N_ERROR_IF(protocol_version != conn->actual_protocol_version, S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
- GUARD(s2n_stuffer_read_bytes(from, cipher_suite, S2N_TLS_CIPHER_SUITE_LEN));
+ POSIX_GUARD(s2n_stuffer_read_bytes(from, cipher_suite, S2N_TLS_CIPHER_SUITE_LEN));
S2N_ERROR_IF(memcmp(conn->secure.cipher_suite->iana_value, cipher_suite, S2N_TLS_CIPHER_SUITE_LEN), S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
uint64_t now;
- GUARD(conn->config->wall_clock(conn->config->sys_clock_ctx, &now));
+ POSIX_GUARD(conn->config->wall_clock(conn->config->sys_clock_ctx, &now));
uint64_t then;
- GUARD(s2n_stuffer_read_uint64(from, &then));
+ POSIX_GUARD(s2n_stuffer_read_uint64(from, &then));
S2N_ERROR_IF(then > now, S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
S2N_ERROR_IF(now - then > conn->config->session_state_lifetime_in_nanos, S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
- /* Last but not least, put the master secret in place */
- GUARD(s2n_stuffer_read_bytes(from, conn->secure.master_secret, S2N_TLS_SECRET_LEN));
+ POSIX_GUARD(s2n_stuffer_read_bytes(from, conn->secrets.tls12.master_secret, S2N_TLS_SECRET_LEN));
+
+ if (s2n_stuffer_data_available(from)) {
+ uint8_t ems_negotiated = 0;
+ POSIX_GUARD(s2n_stuffer_read_uint8(from, &ems_negotiated));
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc7627#section-5.3
+ *# o If the original session did not use the "extended_master_secret"
+ *# extension but the new ClientHello contains the extension, then the
+ *# server MUST NOT perform the abbreviated handshake. Instead, it
+ *# SHOULD continue with a full handshake (as described in
+ *# Section 5.2) to negotiate a new session.
+ *#
+ *# o If the original session used the "extended_master_secret"
+ *# extension but the new ClientHello does not contain it, the server
+ *# MUST abort the abbreviated handshake.
+ **/
+ if (conn->ems_negotiated != ems_negotiated) {
+ /* The session ticket needs to have the same EMS state as the current session. If it doesn't
+ * have the same state, the current session takes the state of the session ticket and errors.
+ * If the deserialization process errors, we will use this state in a few extra checks
+ * to determine if we can fallback to a full handshake.
+ */
+ conn->ems_negotiated = ems_negotiated;
+ POSIX_BAIL(S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
+ }
+ }
- return 0;
+ return S2N_SUCCESS;
}
static int s2n_client_serialize_resumption_state(struct s2n_connection *conn, struct s2n_stuffer *to)
{
/* Serialize session ticket */
if (conn->config->use_tickets && conn->client_ticket.size > 0) {
- GUARD(s2n_stuffer_write_uint8(to, S2N_STATE_WITH_SESSION_TICKET));
- GUARD(s2n_stuffer_write_uint16(to, conn->client_ticket.size));
- GUARD(s2n_stuffer_write(to, &conn->client_ticket));
+ POSIX_GUARD(s2n_stuffer_write_uint8(to, S2N_STATE_WITH_SESSION_TICKET));
+ POSIX_GUARD(s2n_stuffer_write_uint16(to, conn->client_ticket.size));
+ POSIX_GUARD(s2n_stuffer_write(to, &conn->client_ticket));
} else {
/* Serialize session id */
- GUARD(s2n_stuffer_write_uint8(to, S2N_STATE_WITH_SESSION_ID));
- GUARD(s2n_stuffer_write_uint8(to, conn->session_id_len));
- GUARD(s2n_stuffer_write_bytes(to, conn->session_id, conn->session_id_len));
+ POSIX_ENSURE_LT(conn->actual_protocol_version, S2N_TLS13);
+ POSIX_GUARD(s2n_stuffer_write_uint8(to, S2N_STATE_WITH_SESSION_ID));
+ POSIX_GUARD(s2n_stuffer_write_uint8(to, conn->session_id_len));
+ POSIX_GUARD(s2n_stuffer_write_bytes(to, conn->session_id, conn->session_id_len));
}
/* Serialize session state */
- GUARD(s2n_serialize_resumption_state(conn, to));
+ POSIX_GUARD_RESULT(s2n_serialize_resumption_state(conn, to));
return 0;
}
-static int s2n_client_deserialize_session_state(struct s2n_connection *conn, struct s2n_stuffer *from)
-{
- if (s2n_stuffer_data_available(from) < S2N_STATE_SIZE_IN_BYTES) {
- S2N_ERROR(S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
+static S2N_RESULT s2n_tls12_client_deserialize_session_state(struct s2n_connection *conn, struct s2n_stuffer *from)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(from);
+
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint8(from, &conn->actual_protocol_version));
+
+ uint8_t *cipher_suite_wire = s2n_stuffer_raw_read(from, S2N_TLS_CIPHER_SUITE_LEN);
+ RESULT_ENSURE_REF(cipher_suite_wire);
+ RESULT_GUARD_POSIX(s2n_set_cipher_as_client(conn, cipher_suite_wire));
+
+ uint64_t then = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint64(from, &then));
+
+ RESULT_GUARD_POSIX(s2n_stuffer_read_bytes(from, conn->secrets.tls12.master_secret, S2N_TLS_SECRET_LEN));
+
+ if (s2n_stuffer_data_available(from)) {
+ uint8_t ems_negotiated = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint8(from, &ems_negotiated));
+ conn->ems_negotiated = ems_negotiated;
}
+ return S2N_RESULT_OK;
+}
- uint8_t format;
- uint64_t then;
+static S2N_RESULT s2n_validate_ticket_age(uint64_t current_time, uint64_t ticket_issue_time)
+{
+ RESULT_ENSURE(current_time >= ticket_issue_time, S2N_ERR_INVALID_SESSION_TICKET);
+ uint64_t ticket_age_in_nanos = current_time - ticket_issue_time;
+ uint64_t ticket_age_in_sec = ticket_age_in_nanos / ONE_SEC_IN_NANOS;
+ RESULT_ENSURE(ticket_age_in_sec <= ONE_WEEK_IN_SEC, S2N_ERR_INVALID_SESSION_TICKET);
+ return S2N_RESULT_OK;
+}
- GUARD(s2n_stuffer_read_uint8(from, &format));
- if (format != S2N_SERIALIZED_FORMAT_VERSION) {
- S2N_ERROR(S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
+static S2N_RESULT s2n_tls13_deserialize_session_state(struct s2n_connection *conn, struct s2n_blob *psk_identity, struct s2n_stuffer *from)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(psk_identity);
+ RESULT_ENSURE_REF(from);
+
+ DEFER_CLEANUP(struct s2n_psk psk = { 0 }, s2n_psk_wipe);
+ RESULT_GUARD(s2n_psk_init(&psk, S2N_PSK_TYPE_RESUMPTION));
+ RESULT_GUARD_POSIX(s2n_psk_set_identity(&psk, psk_identity->data, psk_identity->size));
+
+ uint8_t protocol_version = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint8(from, &protocol_version));
+ RESULT_ENSURE_GTE(protocol_version, S2N_TLS13);
+
+ uint8_t iana_id[S2N_TLS_CIPHER_SUITE_LEN] = { 0 };
+ RESULT_GUARD_POSIX(s2n_stuffer_read_bytes(from, iana_id, S2N_TLS_CIPHER_SUITE_LEN));
+ struct s2n_cipher_suite *cipher_suite = NULL;
+ RESULT_GUARD(s2n_cipher_suite_from_iana(iana_id, &cipher_suite));
+ RESULT_ENSURE_REF(cipher_suite);
+ psk.hmac_alg = cipher_suite->prf_alg;
+
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint64(from, &psk.ticket_issue_time));
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.6.1
+ *# Clients MUST NOT cache
+ *# tickets for longer than 7 days, regardless of the ticket_lifetime,
+ *# and MAY delete tickets earlier based on local policy.
+ */
+ uint64_t current_time = 0;
+ RESULT_GUARD_POSIX(conn->config->wall_clock(conn->config->sys_clock_ctx, &current_time));
+ RESULT_GUARD(s2n_validate_ticket_age(current_time, psk.ticket_issue_time));
+
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint32(from, &psk.ticket_age_add));
+
+ uint8_t secret_len = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint8(from, &secret_len));
+ RESULT_ENSURE_LTE(secret_len, S2N_TLS_SECRET_LEN);
+ uint8_t *secret_data = s2n_stuffer_raw_read(from, secret_len);
+ RESULT_ENSURE_REF(secret_data);
+ RESULT_GUARD_POSIX(s2n_psk_set_secret(&psk, secret_data, secret_len));
+
+ if (conn->mode == S2N_SERVER) {
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint64(from, &psk.keying_material_expiration));
+ RESULT_ENSURE(psk.keying_material_expiration > current_time, S2N_ERR_KEYING_MATERIAL_EXPIRED);
}
- GUARD(s2n_stuffer_read_uint8(from, &conn->actual_protocol_version));
+ uint32_t max_early_data_size = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint32(from, &max_early_data_size));
+ if (max_early_data_size > 0) {
+ RESULT_GUARD_POSIX(s2n_psk_configure_early_data(&psk, max_early_data_size,
+ iana_id[0], iana_id[1]));
+
+ uint8_t app_proto_size = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint8(from, &app_proto_size));
+ uint8_t *app_proto_data = s2n_stuffer_raw_read(from, app_proto_size);
+ RESULT_ENSURE_REF(app_proto_data);
+ RESULT_GUARD_POSIX(s2n_psk_set_application_protocol(&psk, app_proto_data, app_proto_size));
+
+ uint16_t early_data_context_size = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint16(from, &early_data_context_size));
+ uint8_t *early_data_context_data = s2n_stuffer_raw_read(from, early_data_context_size);
+ RESULT_ENSURE_REF(early_data_context_data);
+ RESULT_GUARD_POSIX(s2n_psk_set_early_data_context(&psk, early_data_context_data, early_data_context_size));
+ }
- uint8_t *cipher_suite_wire = s2n_stuffer_raw_read(from, S2N_TLS_CIPHER_SUITE_LEN);
- notnull_check(cipher_suite_wire);
- GUARD(s2n_set_cipher_as_client(conn, cipher_suite_wire));
+ /* Make sure that this connection is configured for resumption PSKs, not external PSKs */
+ RESULT_GUARD(s2n_connection_set_psk_type(conn, S2N_PSK_TYPE_RESUMPTION));
+ /* Remove all previously-set PSKs. To keep the session ticket API behavior consistent
+ * across protocol versions, we currently only support setting a single resumption PSK. */
+ RESULT_GUARD(s2n_psk_parameters_wipe(&conn->psk_params));
+ RESULT_GUARD_POSIX(s2n_connection_append_psk(conn, &psk));
- GUARD(s2n_stuffer_read_uint64(from, &then));
+ return S2N_RESULT_OK;
+}
- /* Last but not least, put the master secret in place */
- GUARD(s2n_stuffer_read_bytes(from, conn->secure.master_secret, S2N_TLS_SECRET_LEN));
+static S2N_RESULT s2n_deserialize_resumption_state(struct s2n_connection *conn, struct s2n_blob *psk_identity, struct s2n_stuffer *from)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(from);
- return 0;
+ uint8_t format = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint8(from, &format));
+
+ if (format == S2N_SERIALIZED_FORMAT_TLS12_V3) {
+ if (conn->mode == S2N_SERVER) {
+ RESULT_GUARD_POSIX(s2n_tls12_deserialize_resumption_state(conn, from));
+ } else {
+ RESULT_GUARD(s2n_tls12_client_deserialize_session_state(conn, from));
+ }
+ } else if (format == S2N_SERIALIZED_FORMAT_TLS13_V1) {
+ RESULT_GUARD(s2n_tls13_deserialize_session_state(conn, psk_identity, from));
+ if (conn->mode == S2N_CLIENT) {
+ /* Free the client_ticket after setting a psk on the connection.
+ * This prevents s2n_connection_get_session from returning a TLS1.3
+ * ticket before a ticket has been received from the server. */
+ RESULT_GUARD_POSIX(s2n_free(&conn->client_ticket));
+ }
+ } else {
+ RESULT_BAIL(S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
+ }
+ conn->set_session = true;
+ return S2N_RESULT_OK;
}
static int s2n_client_deserialize_with_session_id(struct s2n_connection *conn, struct s2n_stuffer *from)
{
uint8_t session_id_len;
- GUARD(s2n_stuffer_read_uint8(from, &session_id_len));
+ POSIX_GUARD(s2n_stuffer_read_uint8(from, &session_id_len));
if (session_id_len == 0 || session_id_len > S2N_TLS_SESSION_ID_MAX_LEN
|| session_id_len > s2n_stuffer_data_available(from)) {
- S2N_ERROR(S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
+ POSIX_BAIL(S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
}
conn->session_id_len = session_id_len;
- GUARD(s2n_stuffer_read_bytes(from, conn->session_id, session_id_len));
+ POSIX_GUARD(s2n_stuffer_read_bytes(from, conn->session_id, session_id_len));
- GUARD(s2n_client_deserialize_session_state(conn, from));
+ POSIX_GUARD_RESULT(s2n_deserialize_resumption_state(conn, NULL, from));
return 0;
}
@@ -162,16 +366,16 @@ static int s2n_client_deserialize_with_session_id(struct s2n_connection *conn, s
static int s2n_client_deserialize_with_session_ticket(struct s2n_connection *conn, struct s2n_stuffer *from)
{
uint16_t session_ticket_len;
- GUARD(s2n_stuffer_read_uint16(from, &session_ticket_len));
+ POSIX_GUARD(s2n_stuffer_read_uint16(from, &session_ticket_len));
if (session_ticket_len == 0 || session_ticket_len > s2n_stuffer_data_available(from)) {
- S2N_ERROR(S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
+ POSIX_BAIL(S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
}
- GUARD(s2n_realloc(&conn->client_ticket, session_ticket_len));
- GUARD(s2n_stuffer_read(from, &conn->client_ticket));
+ POSIX_GUARD(s2n_realloc(&conn->client_ticket, session_ticket_len));
+ POSIX_GUARD(s2n_stuffer_read(from, &conn->client_ticket));
- GUARD(s2n_client_deserialize_session_state(conn, from));
+ POSIX_GUARD_RESULT(s2n_deserialize_resumption_state(conn, &conn->client_ticket, from));
return 0;
}
@@ -179,17 +383,17 @@ static int s2n_client_deserialize_with_session_ticket(struct s2n_connection *con
static int s2n_client_deserialize_resumption_state(struct s2n_connection *conn, struct s2n_stuffer *from)
{
uint8_t format;
- GUARD(s2n_stuffer_read_uint8(from, &format));
+ POSIX_GUARD(s2n_stuffer_read_uint8(from, &format));
switch (format) {
case S2N_STATE_WITH_SESSION_ID:
- GUARD(s2n_client_deserialize_with_session_id(conn, from));
+ POSIX_GUARD(s2n_client_deserialize_with_session_id(conn, from));
break;
case S2N_STATE_WITH_SESSION_TICKET:
- GUARD(s2n_client_deserialize_with_session_ticket(conn, from));
+ POSIX_GUARD(s2n_client_deserialize_with_session_ticket(conn, from));
break;
default:
- S2N_ERROR(S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
+ POSIX_BAIL(S2N_ERR_INVALID_SERIALIZED_SESSION_STATE);
}
return 0;
@@ -200,67 +404,67 @@ int s2n_resume_from_cache(struct s2n_connection *conn)
S2N_ERROR_IF(conn->session_id_len == 0, S2N_ERR_SESSION_ID_TOO_SHORT);
S2N_ERROR_IF(conn->session_id_len > S2N_TLS_SESSION_ID_MAX_LEN, S2N_ERR_SESSION_ID_TOO_LONG);
- uint8_t data[S2N_TICKET_SIZE_IN_BYTES] = { 0 };
+ uint8_t data[S2N_TLS12_TICKET_SIZE_IN_BYTES] = { 0 };
struct s2n_blob entry = {0};
- GUARD(s2n_blob_init(&entry, data, S2N_TICKET_SIZE_IN_BYTES));
+ POSIX_GUARD(s2n_blob_init(&entry, data, S2N_TLS12_TICKET_SIZE_IN_BYTES));
uint64_t size = entry.size;
int result = conn->config->cache_retrieve(conn, conn->config->cache_retrieve_data, conn->session_id, conn->session_id_len, entry.data, &size);
if (result == S2N_CALLBACK_BLOCKED) {
- S2N_ERROR(S2N_ERR_ASYNC_BLOCKED);
+ POSIX_BAIL(S2N_ERR_ASYNC_BLOCKED);
}
- GUARD(result);
+ POSIX_GUARD(result);
S2N_ERROR_IF(size != entry.size, S2N_ERR_SIZE_MISMATCH);
struct s2n_stuffer from = {0};
- GUARD(s2n_stuffer_init(&from, &entry));
- GUARD(s2n_stuffer_write(&from, &entry));
- GUARD(s2n_decrypt_session_cache(conn, &from));
+ POSIX_GUARD(s2n_stuffer_init(&from, &entry));
+ POSIX_GUARD(s2n_stuffer_write(&from, &entry));
+ POSIX_GUARD(s2n_decrypt_session_cache(conn, &from));
return 0;
}
-int s2n_store_to_cache(struct s2n_connection *conn)
+S2N_RESULT s2n_store_to_cache(struct s2n_connection *conn)
{
- uint8_t data[S2N_TICKET_SIZE_IN_BYTES] = { 0 };
+ uint8_t data[S2N_TLS12_TICKET_SIZE_IN_BYTES] = { 0 };
struct s2n_blob entry = {0};
- GUARD(s2n_blob_init(&entry, data, S2N_TICKET_SIZE_IN_BYTES));
+ RESULT_GUARD_POSIX(s2n_blob_init(&entry, data, S2N_TLS12_TICKET_SIZE_IN_BYTES));
struct s2n_stuffer to = {0};
/* session_id_len should always be >0 since either the Client provided a SessionId or the Server generated a new
* one for the Client */
- S2N_ERROR_IF(conn->session_id_len == 0, S2N_ERR_SESSION_ID_TOO_SHORT);
- S2N_ERROR_IF(conn->session_id_len > S2N_TLS_SESSION_ID_MAX_LEN, S2N_ERR_SESSION_ID_TOO_LONG);
+ RESULT_ENSURE(conn->session_id_len > 0, S2N_ERR_SESSION_ID_TOO_SHORT);
+ RESULT_ENSURE(conn->session_id_len <= S2N_TLS_SESSION_ID_MAX_LEN, S2N_ERR_SESSION_ID_TOO_LONG);
- GUARD(s2n_stuffer_init(&to, &entry));
- GUARD(s2n_encrypt_session_cache(conn, &to));
+ RESULT_GUARD_POSIX(s2n_stuffer_init(&to, &entry));
+ RESULT_GUARD_POSIX(s2n_encrypt_session_cache(conn, &to));
/* Store to the cache */
conn->config->cache_store(conn, conn->config->cache_store_data, S2N_TLS_SESSION_CACHE_TTL, conn->session_id, conn->session_id_len, entry.data, entry.size);
- return 0;
+ return S2N_RESULT_OK;
}
int s2n_connection_set_session(struct s2n_connection *conn, const uint8_t *session, size_t length)
{
- notnull_check(conn);
- notnull_check(session);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(session);
DEFER_CLEANUP(struct s2n_blob session_data = {0}, s2n_free);
- GUARD(s2n_alloc(&session_data, length));
- memcpy(session_data.data, session, length);
+ POSIX_GUARD(s2n_alloc(&session_data, length));
+ POSIX_CHECKED_MEMCPY(session_data.data, session, length);
struct s2n_stuffer from = {0};
- GUARD(s2n_stuffer_init(&from, &session_data));
- GUARD(s2n_stuffer_write(&from, &session_data));
- GUARD(s2n_client_deserialize_resumption_state(conn, &from));
+ POSIX_GUARD(s2n_stuffer_init(&from, &session_data));
+ POSIX_GUARD(s2n_stuffer_write(&from, &session_data));
+ POSIX_GUARD(s2n_client_deserialize_resumption_state(conn, &from));
return 0;
}
int s2n_connection_get_session(struct s2n_connection *conn, uint8_t *session, size_t max_length)
{
- notnull_check(conn);
- notnull_check(session);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(session);
int len = s2n_connection_get_session_length(conn);
@@ -271,52 +475,93 @@ int s2n_connection_get_session(struct s2n_connection *conn, uint8_t *session, si
S2N_ERROR_IF(len > max_length, S2N_ERR_SERIALIZED_SESSION_STATE_TOO_LONG);
struct s2n_blob serialized_data = {0};
- GUARD(s2n_blob_init(&serialized_data, session, len));
- GUARD(s2n_blob_zero(&serialized_data));
+ POSIX_GUARD(s2n_blob_init(&serialized_data, session, len));
+ POSIX_GUARD(s2n_blob_zero(&serialized_data));
struct s2n_stuffer to = {0};
- GUARD(s2n_stuffer_init(&to, &serialized_data));
- GUARD(s2n_client_serialize_resumption_state(conn, &to));
+ POSIX_GUARD(s2n_stuffer_init(&to, &serialized_data));
+ POSIX_GUARD(s2n_client_serialize_resumption_state(conn, &to));
return len;
}
int s2n_connection_get_session_ticket_lifetime_hint(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
S2N_ERROR_IF(!(conn->config->use_tickets && conn->client_ticket.size > 0), S2N_ERR_SESSION_TICKET_NOT_SUPPORTED);
/* Session resumption using session ticket */
return conn->ticket_lifetime_hint;
}
-int s2n_connection_get_session_length(struct s2n_connection *conn)
+S2N_RESULT s2n_connection_get_session_state_size(struct s2n_connection *conn, size_t *state_size)
{
- /* Session resumption using session ticket "format (1) + session_ticket_len + session_ticket + session state" */
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(state_size);
+
+ if (conn->actual_protocol_version < S2N_TLS13) {
+ *state_size = S2N_TLS12_STATE_SIZE_IN_BYTES;
+ return S2N_RESULT_OK;
+ }
+
+ *state_size = S2N_TLS13_FIXED_STATE_SIZE;
+
+ uint8_t secret_size = 0;
+ RESULT_ENSURE_REF(conn->secure.cipher_suite);
+ RESULT_GUARD_POSIX(s2n_hmac_digest_size(conn->secure.cipher_suite->prf_alg, &secret_size));
+ *state_size += secret_size;
+
+ uint32_t server_max_early_data = 0;
+ RESULT_GUARD(s2n_early_data_get_server_max_size(conn, &server_max_early_data));
+ if (server_max_early_data > 0) {
+ *state_size += S2N_TLS13_FIXED_EARLY_DATA_STATE_SIZE
+ + strlen(conn->application_protocol)
+ + conn->server_early_data_context.size;
+ }
+
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_connection_get_session_length_impl(struct s2n_connection *conn, size_t *length)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn->config);
+ RESULT_ENSURE_REF(length);
+ *length = 0;
+
if (conn->config->use_tickets && conn->client_ticket.size > 0) {
- return S2N_STATE_FORMAT_LEN + S2N_SESSION_TICKET_SIZE_LEN + conn->client_ticket.size + S2N_STATE_SIZE_IN_BYTES;
- } else if (conn->session_id_len > 0) {
- /* Session resumption using session id: "format (0) + session_id_len + session_id + session state" */
- return S2N_STATE_FORMAT_LEN + 1 + conn->session_id_len + S2N_STATE_SIZE_IN_BYTES;
- } else {
- return 0;
+ size_t session_state_size = 0;
+ RESULT_GUARD(s2n_connection_get_session_state_size(conn, &session_state_size));
+ *length = S2N_STATE_FORMAT_LEN + S2N_SESSION_TICKET_SIZE_LEN + conn->client_ticket.size + session_state_size;
+ } else if (conn->session_id_len > 0 && conn->actual_protocol_version < S2N_TLS13) {
+ *length = S2N_STATE_FORMAT_LEN + sizeof(conn->session_id_len) + conn->session_id_len + S2N_TLS12_STATE_SIZE_IN_BYTES;
}
+ return S2N_RESULT_OK;
+}
+
+int s2n_connection_get_session_length(struct s2n_connection *conn)
+{
+ size_t length = 0;
+ if (s2n_result_is_ok(s2n_connection_get_session_length_impl(conn, &length))) {
+ return length;
+ }
+ return 0;
}
int s2n_connection_is_session_resumed(struct s2n_connection *conn)
{
- notnull_check(conn);
- return IS_RESUMPTION_HANDSHAKE(conn->handshake.handshake_type) ? 1 : 0;
+ return conn && IS_RESUMPTION_HANDSHAKE(conn)
+ && (conn->actual_protocol_version < S2N_TLS13 || conn->psk_params.type == S2N_PSK_TYPE_RESUMPTION);
}
int s2n_connection_is_ocsp_stapled(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
if (conn->actual_protocol_version >= S2N_TLS13) {
return (s2n_server_can_send_ocsp(conn) || s2n_server_sent_ocsp(conn));
} else {
- return IS_OCSP_STAPLED(conn->handshake.handshake_type);
+ return IS_OCSP_STAPLED(conn);
}
}
@@ -324,15 +569,15 @@ int s2n_config_is_encrypt_decrypt_key_available(struct s2n_config *config)
{
uint64_t now;
struct s2n_ticket_key *ticket_key = NULL;
- GUARD(config->wall_clock(config->sys_clock_ctx, &now));
- notnull_check(config->ticket_keys);
+ POSIX_GUARD(config->wall_clock(config->sys_clock_ctx, &now));
+ POSIX_ENSURE_REF(config->ticket_keys);
uint32_t ticket_keys_len = 0;
- GUARD_AS_POSIX(s2n_set_len(config->ticket_keys, &ticket_keys_len));
+ POSIX_GUARD_RESULT(s2n_set_len(config->ticket_keys, &ticket_keys_len));
for (uint32_t i = ticket_keys_len; i > 0; i--) {
uint32_t idx = i - 1;
- GUARD_AS_POSIX(s2n_set_get(config->ticket_keys, idx, (void **)&ticket_key));
+ POSIX_GUARD_RESULT(s2n_set_get(config->ticket_keys, idx, (void **)&ticket_key));
uint64_t key_intro_time = ticket_key->intro_timestamp;
if (key_intro_time < now
@@ -359,7 +604,7 @@ int s2n_compute_weight_of_encrypt_decrypt_keys(struct s2n_config *config,
/* Compute weight of encrypt-decrypt keys */
for (int i = 0; i < num_encrypt_decrypt_keys; i++) {
- GUARD_AS_POSIX(s2n_set_get(config->ticket_keys, encrypt_decrypt_keys_index[i], (void **)&ticket_key));
+ POSIX_GUARD_RESULT(s2n_set_get(config->ticket_keys, encrypt_decrypt_keys_index[i], (void **)&ticket_key));
uint64_t key_intro_time = ticket_key->intro_timestamp;
uint64_t key_encryption_peak_time = key_intro_time + (config->encrypt_decrypt_key_lifetime_in_nanos / 2);
@@ -378,7 +623,7 @@ int s2n_compute_weight_of_encrypt_decrypt_keys(struct s2n_config *config,
/* Pick a random number in [0, 1). Using 53 bits (IEEE 754 double-precision floats). */
uint64_t random_int = 0;
- GUARD_AS_POSIX(s2n_public_random(pow(2, 53), &random_int));
+ POSIX_GUARD_RESULT(s2n_public_random(pow(2, 53), &random_int));
double random = (double)random_int / (double)pow(2, 53);
/* Compute cumulative weight of encrypt-decrypt keys */
@@ -394,7 +639,7 @@ int s2n_compute_weight_of_encrypt_decrypt_keys(struct s2n_config *config,
}
}
- S2N_ERROR(S2N_ERR_ENCRYPT_DECRYPT_KEY_SELECTION_FAILED);
+ POSIX_BAIL(S2N_ERR_ENCRYPT_DECRYPT_KEY_SELECTION_FAILED);
}
/* This function is used in s2n_encrypt_session_ticket in order for s2n to
@@ -407,15 +652,15 @@ struct s2n_ticket_key *s2n_get_ticket_encrypt_decrypt_key(struct s2n_config *con
struct s2n_ticket_key *ticket_key = NULL;
uint64_t now;
- GUARD_PTR(config->wall_clock(config->sys_clock_ctx, &now));
- notnull_check_ptr(config->ticket_keys);
+ PTR_GUARD_POSIX(config->wall_clock(config->sys_clock_ctx, &now));
+ PTR_ENSURE_REF(config->ticket_keys);
uint32_t ticket_keys_len = 0;
- GUARD_RESULT_PTR(s2n_set_len(config->ticket_keys, &ticket_keys_len));
+ PTR_GUARD_RESULT(s2n_set_len(config->ticket_keys, &ticket_keys_len));
for (uint32_t i = ticket_keys_len; i > 0; i--) {
uint32_t idx = i - 1;
- GUARD_RESULT_PTR(s2n_set_get(config->ticket_keys, idx, (void **)&ticket_key));
+ PTR_GUARD_RESULT(s2n_set_get(config->ticket_keys, idx, (void **)&ticket_key));
uint64_t key_intro_time = ticket_key->intro_timestamp;
if (key_intro_time < now
@@ -426,18 +671,18 @@ struct s2n_ticket_key *s2n_get_ticket_encrypt_decrypt_key(struct s2n_config *con
}
if (num_encrypt_decrypt_keys == 0) {
- S2N_ERROR_PTR(S2N_ERR_NO_TICKET_ENCRYPT_DECRYPT_KEY);
+ PTR_BAIL(S2N_ERR_NO_TICKET_ENCRYPT_DECRYPT_KEY);
}
if (num_encrypt_decrypt_keys == 1) {
- GUARD_RESULT_PTR(s2n_set_get(config->ticket_keys, encrypt_decrypt_keys_index[0], (void **)&ticket_key));
+ PTR_GUARD_RESULT(s2n_set_get(config->ticket_keys, encrypt_decrypt_keys_index[0], (void **)&ticket_key));
return ticket_key;
}
int8_t idx;
- GUARD_PTR(idx = s2n_compute_weight_of_encrypt_decrypt_keys(config, encrypt_decrypt_keys_index, num_encrypt_decrypt_keys, now));
+ PTR_GUARD_POSIX(idx = s2n_compute_weight_of_encrypt_decrypt_keys(config, encrypt_decrypt_keys_index, num_encrypt_decrypt_keys, now));
- GUARD_RESULT_PTR(s2n_set_get(config->ticket_keys, idx, (void **)&ticket_key));
+ PTR_GUARD_RESULT(s2n_set_get(config->ticket_keys, idx, (void **)&ticket_key));
return ticket_key;
}
@@ -448,14 +693,14 @@ struct s2n_ticket_key *s2n_find_ticket_key(struct s2n_config *config, const uint
{
uint64_t now;
struct s2n_ticket_key *ticket_key = NULL;
- GUARD_PTR(config->wall_clock(config->sys_clock_ctx, &now));
- notnull_check_ptr(config->ticket_keys);
+ PTR_GUARD_POSIX(config->wall_clock(config->sys_clock_ctx, &now));
+ PTR_ENSURE_REF(config->ticket_keys);
uint32_t ticket_keys_len = 0;
- GUARD_RESULT_PTR(s2n_set_len(config->ticket_keys, &ticket_keys_len));
+ PTR_GUARD_RESULT(s2n_set_len(config->ticket_keys, &ticket_keys_len));
for (uint32_t i = 0; i < ticket_keys_len; i++) {
- GUARD_RESULT_PTR(s2n_set_get(config->ticket_keys, i, (void **)&ticket_key));
+ PTR_GUARD_RESULT(s2n_set_get(config->ticket_keys, i, (void **)&ticket_key));
if (memcmp(ticket_key->key_name, name, S2N_TICKET_KEY_NAME_LEN) == 0) {
@@ -482,107 +727,104 @@ int s2n_encrypt_session_ticket(struct s2n_connection *conn, struct s2n_stuffer *
uint8_t iv_data[S2N_TLS_GCM_IV_LEN] = { 0 };
struct s2n_blob iv = {0};
- GUARD(s2n_blob_init(&iv, iv_data, sizeof(iv_data)));
+ POSIX_GUARD(s2n_blob_init(&iv, iv_data, sizeof(iv_data)));
uint8_t aad_data[S2N_TICKET_AAD_LEN] = { 0 };
struct s2n_blob aad_blob = {0};
- GUARD(s2n_blob_init(&aad_blob, aad_data, sizeof(aad_data)));
+ POSIX_GUARD(s2n_blob_init(&aad_blob, aad_data, sizeof(aad_data)));
struct s2n_stuffer aad = {0};
- uint8_t s_data[S2N_STATE_SIZE_IN_BYTES + S2N_TLS_GCM_TAG_LEN] = { 0 };
- struct s2n_blob state_blob = {0};
- GUARD(s2n_blob_init(&state_blob, s_data, sizeof(s_data)));
- struct s2n_stuffer state = {0};
-
key = s2n_get_ticket_encrypt_decrypt_key(conn->config);
/* No keys loaded by the user or the keys are either in decrypt-only or expired state */
S2N_ERROR_IF(!key, S2N_ERR_NO_TICKET_ENCRYPT_DECRYPT_KEY);
- GUARD(s2n_stuffer_write_bytes(to, key->key_name, S2N_TICKET_KEY_NAME_LEN));
+ POSIX_GUARD(s2n_stuffer_write_bytes(to, key->key_name, S2N_TICKET_KEY_NAME_LEN));
- GUARD_AS_POSIX(s2n_get_public_random_data(&iv));
- GUARD(s2n_stuffer_write(to, &iv));
+ POSIX_GUARD_RESULT(s2n_get_public_random_data(&iv));
+ POSIX_GUARD(s2n_stuffer_write(to, &iv));
- GUARD(s2n_blob_init(&aes_key_blob, key->aes_key, S2N_AES256_KEY_LEN));
- GUARD(s2n_session_key_alloc(&aes_ticket_key));
- GUARD(s2n_aes256_gcm.init(&aes_ticket_key));
- GUARD(s2n_aes256_gcm.set_encryption_key(&aes_ticket_key, &aes_key_blob));
+ POSIX_GUARD(s2n_blob_init(&aes_key_blob, key->aes_key, S2N_AES256_KEY_LEN));
+ POSIX_GUARD(s2n_session_key_alloc(&aes_ticket_key));
+ POSIX_GUARD(s2n_aes256_gcm.init(&aes_ticket_key));
+ POSIX_GUARD(s2n_aes256_gcm.set_encryption_key(&aes_ticket_key, &aes_key_blob));
- GUARD(s2n_stuffer_init(&aad, &aad_blob));
- GUARD(s2n_stuffer_write_bytes(&aad, key->implicit_aad, S2N_TICKET_AAD_IMPLICIT_LEN));
- GUARD(s2n_stuffer_write_bytes(&aad, key->key_name, S2N_TICKET_KEY_NAME_LEN));
+ POSIX_GUARD(s2n_stuffer_init(&aad, &aad_blob));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&aad, key->implicit_aad, S2N_TICKET_AAD_IMPLICIT_LEN));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&aad, key->key_name, S2N_TICKET_KEY_NAME_LEN));
- GUARD(s2n_stuffer_init(&state, &state_blob));
- GUARD(s2n_serialize_resumption_state(conn, &state));
+ uint32_t plaintext_header_size = s2n_stuffer_data_available(to);
+ POSIX_GUARD_RESULT(s2n_serialize_resumption_state(conn, to));
+ POSIX_GUARD(s2n_stuffer_skip_write(to, S2N_TLS_GCM_TAG_LEN));
- GUARD(s2n_aes256_gcm.io.aead.encrypt(&aes_ticket_key, &iv, &aad_blob, &state_blob, &state_blob));
+ struct s2n_blob state_blob = { 0 };
+ struct s2n_stuffer copy_for_encryption = *to;
+ POSIX_GUARD(s2n_stuffer_skip_read(&copy_for_encryption, plaintext_header_size));
+ uint32_t state_blob_size = s2n_stuffer_data_available(&copy_for_encryption);
+ uint8_t *state_blob_data = s2n_stuffer_raw_read(&copy_for_encryption, state_blob_size);
+ POSIX_ENSURE_REF(state_blob_data);
+ POSIX_GUARD(s2n_blob_init(&state_blob, state_blob_data, state_blob_size));
- GUARD(s2n_stuffer_write(to, &state_blob));
+ POSIX_GUARD(s2n_aes256_gcm.io.aead.encrypt(&aes_ticket_key, &iv, &aad_blob, &state_blob, &state_blob));
- GUARD(s2n_aes256_gcm.destroy_key(&aes_ticket_key));
- GUARD(s2n_session_key_free(&aes_ticket_key));
+ POSIX_GUARD(s2n_aes256_gcm.destroy_key(&aes_ticket_key));
+ POSIX_GUARD(s2n_session_key_free(&aes_ticket_key));
return 0;
}
-int s2n_decrypt_session_ticket(struct s2n_connection *conn)
+int s2n_decrypt_session_ticket(struct s2n_connection *conn, struct s2n_stuffer *from)
{
struct s2n_ticket_key *key;
DEFER_CLEANUP(struct s2n_session_key aes_ticket_key = {0}, s2n_session_key_free);
struct s2n_blob aes_key_blob = {0};
- struct s2n_stuffer *from;
uint8_t key_name[S2N_TICKET_KEY_NAME_LEN];
uint8_t iv_data[S2N_TLS_GCM_IV_LEN] = { 0 };
struct s2n_blob iv = { 0 };
- GUARD(s2n_blob_init(&iv, iv_data, sizeof(iv_data)));
+ POSIX_GUARD(s2n_blob_init(&iv, iv_data, sizeof(iv_data)));
uint8_t aad_data[S2N_TICKET_AAD_LEN] = { 0 };
struct s2n_blob aad_blob = {0};
- GUARD(s2n_blob_init(&aad_blob, aad_data, sizeof(aad_data)));
+ POSIX_GUARD(s2n_blob_init(&aad_blob, aad_data, sizeof(aad_data)));
struct s2n_stuffer aad = {0};
- uint8_t s_data[S2N_STATE_SIZE_IN_BYTES] = { 0 };
- struct s2n_blob state_blob = {0};
- GUARD(s2n_blob_init(&state_blob, s_data, sizeof(s_data)));
- struct s2n_stuffer state = {0};
-
- uint8_t en_data[S2N_STATE_SIZE_IN_BYTES + S2N_TLS_GCM_TAG_LEN] = {0};
- struct s2n_blob en_blob = {0};
- GUARD(s2n_blob_init(&en_blob, en_data, sizeof(en_data)));
-
- from = &conn->client_ticket_to_decrypt;
- GUARD(s2n_stuffer_read_bytes(from, key_name, S2N_TICKET_KEY_NAME_LEN));
+ POSIX_GUARD(s2n_stuffer_read_bytes(from, key_name, S2N_TICKET_KEY_NAME_LEN));
key = s2n_find_ticket_key(conn->config, key_name);
/* Key has expired; do full handshake with New Session Ticket (NST) */
S2N_ERROR_IF(!key, S2N_ERR_KEY_USED_IN_SESSION_TICKET_NOT_FOUND);
- GUARD(s2n_stuffer_read(from, &iv));
+ POSIX_GUARD(s2n_stuffer_read(from, &iv));
s2n_blob_init(&aes_key_blob, key->aes_key, S2N_AES256_KEY_LEN);
- GUARD(s2n_session_key_alloc(&aes_ticket_key));
- GUARD(s2n_aes256_gcm.init(&aes_ticket_key));
- GUARD(s2n_aes256_gcm.set_decryption_key(&aes_ticket_key, &aes_key_blob));
-
- GUARD(s2n_stuffer_init(&aad, &aad_blob));
- GUARD(s2n_stuffer_write_bytes(&aad, key->implicit_aad, S2N_TICKET_AAD_IMPLICIT_LEN));
- GUARD(s2n_stuffer_write_bytes(&aad, key->key_name, S2N_TICKET_KEY_NAME_LEN));
-
- GUARD(s2n_stuffer_read(from, &en_blob));
-
- GUARD(s2n_aes256_gcm.io.aead.decrypt(&aes_ticket_key, &iv, &aad_blob, &en_blob, &en_blob));
-
- GUARD(s2n_stuffer_init(&state, &state_blob));
- GUARD(s2n_stuffer_write_bytes(&state, en_data, S2N_STATE_SIZE_IN_BYTES));
-
- GUARD(s2n_deserialize_resumption_state(conn, &state));
+ POSIX_GUARD(s2n_session_key_alloc(&aes_ticket_key));
+ POSIX_GUARD(s2n_aes256_gcm.init(&aes_ticket_key));
+ POSIX_GUARD(s2n_aes256_gcm.set_decryption_key(&aes_ticket_key, &aes_key_blob));
+
+ POSIX_GUARD(s2n_stuffer_init(&aad, &aad_blob));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&aad, key->implicit_aad, S2N_TICKET_AAD_IMPLICIT_LEN));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&aad, key->key_name, S2N_TICKET_KEY_NAME_LEN));
+
+ struct s2n_blob en_blob = { 0 };
+ uint32_t en_blob_size = s2n_stuffer_data_available(from);
+ uint8_t *en_blob_data = s2n_stuffer_raw_read(from, en_blob_size);
+ POSIX_ENSURE_REF(en_blob_data);
+ POSIX_GUARD(s2n_blob_init(&en_blob, en_blob_data, en_blob_size));
+ POSIX_GUARD(s2n_aes256_gcm.io.aead.decrypt(&aes_ticket_key, &iv, &aad_blob, &en_blob, &en_blob));
+
+ struct s2n_blob state_blob = { 0 };
+ uint32_t state_blob_size = en_blob_size - S2N_TLS_GCM_TAG_LEN;
+ POSIX_GUARD(s2n_blob_init(&state_blob, en_blob.data, state_blob_size));
+ struct s2n_stuffer state_stuffer = { 0 };
+ POSIX_GUARD(s2n_stuffer_init(&state_stuffer, &state_blob));
+ POSIX_GUARD(s2n_stuffer_skip_write(&state_stuffer, state_blob_size));
+ POSIX_GUARD_RESULT(s2n_deserialize_resumption_state(conn, &from->blob, &state_stuffer));
uint64_t now;
- GUARD(conn->config->wall_clock(conn->config->sys_clock_ctx, &now));
+ POSIX_GUARD(conn->config->wall_clock(conn->config->sys_clock_ctx, &now));
/* If the key is in decrypt-only state, then a new key is assigned
* for the ticket.
@@ -591,13 +833,11 @@ int s2n_decrypt_session_ticket(struct s2n_connection *conn)
/* Check if a key in encrypt-decrypt state is available */
if (s2n_config_is_encrypt_decrypt_key_available(conn->config) == 1) {
conn->session_ticket_status = S2N_NEW_TICKET;
- conn->handshake.handshake_type |= WITH_SESSION_TICKET;
-
- return 0;
+ POSIX_GUARD_RESULT(s2n_handshake_type_set_tls12_flag(conn, WITH_SESSION_TICKET));
+ return S2N_SUCCESS;
}
}
-
- return 0;
+ return S2N_SUCCESS;
}
int s2n_encrypt_session_cache(struct s2n_connection *conn, struct s2n_stuffer *to)
@@ -605,7 +845,6 @@ int s2n_encrypt_session_cache(struct s2n_connection *conn, struct s2n_stuffer *t
return s2n_encrypt_session_ticket(conn, to);
}
-
int s2n_decrypt_session_cache(struct s2n_connection *conn, struct s2n_stuffer *from)
{
struct s2n_ticket_key *key;
@@ -616,51 +855,50 @@ int s2n_decrypt_session_cache(struct s2n_connection *conn, struct s2n_stuffer *f
uint8_t iv_data[S2N_TLS_GCM_IV_LEN] = { 0 };
struct s2n_blob iv = {0};
- GUARD(s2n_blob_init(&iv, iv_data, sizeof(iv_data)));
+ POSIX_GUARD(s2n_blob_init(&iv, iv_data, sizeof(iv_data)));
uint8_t aad_data[S2N_TICKET_AAD_LEN] = { 0 };
struct s2n_blob aad_blob = {0};
- GUARD(s2n_blob_init(&aad_blob, aad_data, sizeof(aad_data)));
+ POSIX_GUARD(s2n_blob_init(&aad_blob, aad_data, sizeof(aad_data)));
struct s2n_stuffer aad = {0};
- uint8_t s_data[S2N_STATE_SIZE_IN_BYTES] = { 0 };
+ uint8_t s_data[S2N_TLS12_STATE_SIZE_IN_BYTES] = { 0 };
struct s2n_blob state_blob = {0};
- GUARD(s2n_blob_init(&state_blob, s_data, sizeof(s_data)));
+ POSIX_GUARD(s2n_blob_init(&state_blob, s_data, sizeof(s_data)));
struct s2n_stuffer state = {0};
- uint8_t en_data[S2N_STATE_SIZE_IN_BYTES + S2N_TLS_GCM_TAG_LEN] = {0};
+ uint8_t en_data[S2N_TLS12_STATE_SIZE_IN_BYTES + S2N_TLS_GCM_TAG_LEN] = {0};
struct s2n_blob en_blob = {0};
- GUARD(s2n_blob_init(&en_blob, en_data, sizeof(en_data)));
+ POSIX_GUARD(s2n_blob_init(&en_blob, en_data, sizeof(en_data)));
- GUARD(s2n_stuffer_read_bytes(from, key_name, S2N_TICKET_KEY_NAME_LEN));
+ POSIX_GUARD(s2n_stuffer_read_bytes(from, key_name, S2N_TICKET_KEY_NAME_LEN));
key = s2n_find_ticket_key(conn->config, key_name);
/* Key has expired; do full handshake with New Session Ticket (NST) */
S2N_ERROR_IF(!key, S2N_ERR_KEY_USED_IN_SESSION_TICKET_NOT_FOUND);
- GUARD(s2n_stuffer_read(from, &iv));
+ POSIX_GUARD(s2n_stuffer_read(from, &iv));
s2n_blob_init(&aes_key_blob, key->aes_key, S2N_AES256_KEY_LEN);
- GUARD(s2n_session_key_alloc(&aes_ticket_key));
- GUARD(s2n_aes256_gcm.init(&aes_ticket_key));
- GUARD(s2n_aes256_gcm.set_decryption_key(&aes_ticket_key, &aes_key_blob));
-
- GUARD(s2n_stuffer_init(&aad, &aad_blob));
- GUARD(s2n_stuffer_write_bytes(&aad, key->implicit_aad, S2N_TICKET_AAD_IMPLICIT_LEN));
- GUARD(s2n_stuffer_write_bytes(&aad, key->key_name, S2N_TICKET_KEY_NAME_LEN));
+ POSIX_GUARD(s2n_session_key_alloc(&aes_ticket_key));
+ POSIX_GUARD(s2n_aes256_gcm.init(&aes_ticket_key));
+ POSIX_GUARD(s2n_aes256_gcm.set_decryption_key(&aes_ticket_key, &aes_key_blob));
- GUARD(s2n_stuffer_read(from, &en_blob));
+ POSIX_GUARD(s2n_stuffer_init(&aad, &aad_blob));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&aad, key->implicit_aad, S2N_TICKET_AAD_IMPLICIT_LEN));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&aad, key->key_name, S2N_TICKET_KEY_NAME_LEN));
- GUARD(s2n_aes256_gcm.io.aead.decrypt(&aes_ticket_key, &iv, &aad_blob, &en_blob, &en_blob));
+ POSIX_GUARD(s2n_stuffer_read(from, &en_blob));
- GUARD(s2n_stuffer_init(&state, &state_blob));
- GUARD(s2n_stuffer_write_bytes(&state, en_data, S2N_STATE_SIZE_IN_BYTES));
+ POSIX_GUARD(s2n_aes256_gcm.io.aead.decrypt(&aes_ticket_key, &iv, &aad_blob, &en_blob, &en_blob));
+ POSIX_GUARD(s2n_aes256_gcm.destroy_key(&aes_ticket_key));
+ POSIX_GUARD(s2n_session_key_free(&aes_ticket_key));
- GUARD(s2n_deserialize_resumption_state(conn, &state));
+ POSIX_GUARD(s2n_stuffer_init(&state, &state_blob));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&state, en_data, S2N_TLS12_STATE_SIZE_IN_BYTES));
- GUARD(s2n_aes256_gcm.destroy_key(&aes_ticket_key));
- GUARD(s2n_session_key_free(&aes_ticket_key));
+ POSIX_GUARD_RESULT(s2n_deserialize_resumption_state(conn, NULL, &state));
return 0;
}
@@ -680,14 +918,14 @@ int s2n_config_wipe_expired_ticket_crypto_keys(struct s2n_config *config, int8_t
}
uint64_t now;
- GUARD(config->wall_clock(config->sys_clock_ctx, &now));
- notnull_check(config->ticket_keys);
+ POSIX_GUARD(config->wall_clock(config->sys_clock_ctx, &now));
+ POSIX_ENSURE_REF(config->ticket_keys);
uint32_t ticket_keys_len = 0;
- GUARD_AS_POSIX(s2n_set_len(config->ticket_keys, &ticket_keys_len));
+ POSIX_GUARD_RESULT(s2n_set_len(config->ticket_keys, &ticket_keys_len));
for (uint32_t i = 0; i < ticket_keys_len; i++) {
- GUARD_AS_POSIX(s2n_set_get(config->ticket_keys, i, (void **)&ticket_key));
+ POSIX_GUARD_RESULT(s2n_set_get(config->ticket_keys, i, (void **)&ticket_key));
if (now >= ticket_key->intro_timestamp +
config->encrypt_decrypt_key_lifetime_in_nanos + config->decrypt_key_lifetime_in_nanos) {
expired_keys_index[num_of_expired_keys] = i;
@@ -697,7 +935,7 @@ int s2n_config_wipe_expired_ticket_crypto_keys(struct s2n_config *config, int8_t
end:
for (int j = 0; j < num_of_expired_keys; j++) {
- GUARD_AS_POSIX(s2n_set_remove(config->ticket_keys, expired_keys_index[j] - j));
+ POSIX_GUARD_RESULT(s2n_set_remove(config->ticket_keys, expired_keys_index[j] - j));
}
return 0;
@@ -707,6 +945,83 @@ end:
int s2n_config_store_ticket_key(struct s2n_config *config, struct s2n_ticket_key *key)
{
/* Keys are stored from oldest to newest */
- GUARD_AS_POSIX(s2n_set_add(config->ticket_keys, key));
+ POSIX_GUARD_RESULT(s2n_set_add(config->ticket_keys, key));
+ return S2N_SUCCESS;
+}
+
+int s2n_config_set_initial_ticket_count(struct s2n_config *config, uint8_t num)
+{
+ POSIX_ENSURE_REF(config);
+
+ config->initial_tickets_to_send = num;
+ POSIX_GUARD(s2n_config_set_session_tickets_onoff(config, true));
+
+ return S2N_SUCCESS;
+}
+
+int s2n_connection_add_new_tickets_to_send(struct s2n_connection *conn, uint8_t num)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_GUARD_RESULT(s2n_psk_validate_keying_material(conn));
+
+ uint32_t out = conn->tickets_to_send + num;
+ POSIX_ENSURE(out <= UINT16_MAX, S2N_ERR_INTEGER_OVERFLOW);
+ conn->tickets_to_send = out;
+
+ return S2N_SUCCESS;
+}
+
+int s2n_connection_get_tickets_sent(struct s2n_connection *conn, uint16_t *num)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(num);
+ POSIX_ENSURE(conn->mode == S2N_SERVER, S2N_ERR_CLIENT_MODE);
+ *num = conn->tickets_sent;
+ return S2N_SUCCESS;
+}
+
+int s2n_connection_set_server_keying_material_lifetime(struct s2n_connection *conn, uint32_t lifetime_in_secs)
+{
+ POSIX_ENSURE_REF(conn);
+ conn->server_keying_material_lifetime = lifetime_in_secs;
+ return S2N_SUCCESS;
+}
+
+int s2n_config_set_session_ticket_cb(struct s2n_config *config, s2n_session_ticket_fn callback, void *ctx)
+{
+ POSIX_ENSURE_MUT(config);
+
+ config->session_ticket_cb = callback;
+ config->session_ticket_ctx = ctx;
+ return S2N_SUCCESS;
+}
+
+int s2n_session_ticket_get_data_len(struct s2n_session_ticket *ticket, size_t *data_len)
+{
+ POSIX_ENSURE_REF(ticket);
+ POSIX_ENSURE_MUT(data_len);
+
+ *data_len = ticket->ticket_data.size;
+ return S2N_SUCCESS;
+}
+
+int s2n_session_ticket_get_data(struct s2n_session_ticket *ticket, size_t max_data_len, uint8_t *data)
+{
+ POSIX_ENSURE_REF(ticket);
+ POSIX_ENSURE_MUT(data);
+
+ POSIX_ENSURE(ticket->ticket_data.size <= max_data_len, S2N_ERR_SERIALIZED_SESSION_STATE_TOO_LONG);
+ POSIX_CHECKED_MEMCPY(data, ticket->ticket_data.data, ticket->ticket_data.size);
+
+ return S2N_SUCCESS;
+}
+
+int s2n_session_ticket_get_lifetime(struct s2n_session_ticket *ticket, uint32_t *session_lifetime)
+{
+ POSIX_ENSURE_REF(ticket);
+ POSIX_ENSURE_REF(session_lifetime);
+
+ *session_lifetime = ticket->session_lifetime;
+
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_resume.h b/contrib/restricted/aws/s2n/tls/s2n_resume.h
index c58025e41f..2d104d6b4c 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_resume.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_resume.h
@@ -19,16 +19,22 @@
#include "stuffer/s2n_stuffer.h"
-#define S2N_SERIALIZED_FORMAT_VERSION 1
#define S2N_STATE_LIFETIME_IN_NANOS 54000000000000 /* 15 hours */
-#define S2N_STATE_SIZE_IN_BYTES (1 + 8 + 1 + S2N_TLS_CIPHER_SUITE_LEN + S2N_TLS_SECRET_LEN)
+#define S2N_TLS12_STATE_SIZE_IN_BYTES (1 + 8 + 1 + S2N_TLS_CIPHER_SUITE_LEN + S2N_TLS_SECRET_LEN + 1)
+#define S2N_TLS13_FIXED_STATE_SIZE 21
+#define S2N_TLS13_FIXED_EARLY_DATA_STATE_SIZE 3
+
#define S2N_TLS_SESSION_CACHE_TTL (6 * 60 * 60)
#define S2N_TICKET_KEY_NAME_LEN 16
#define S2N_TICKET_AAD_IMPLICIT_LEN 12
#define S2N_TICKET_AAD_LEN (S2N_TICKET_AAD_IMPLICIT_LEN + S2N_TICKET_KEY_NAME_LEN)
#define S2N_AES256_KEY_LEN 32
#define ONE_SEC_IN_NANOS 1000000000
-#define S2N_TICKET_SIZE_IN_BYTES (S2N_TICKET_KEY_NAME_LEN + S2N_TLS_GCM_IV_LEN + S2N_STATE_SIZE_IN_BYTES + S2N_TLS_GCM_TAG_LEN)
+#define ONE_MILLISEC_IN_NANOS 1000000
+#define ONE_WEEK_IN_SEC 604800
+#define S2N_TLS12_TICKET_SIZE_IN_BYTES (S2N_TICKET_KEY_NAME_LEN + S2N_TLS_GCM_IV_LEN + \
+ S2N_TLS12_STATE_SIZE_IN_BYTES + S2N_TLS_GCM_TAG_LEN)
+
#define S2N_TICKET_ENCRYPT_DECRYPT_KEY_LIFETIME_IN_NANOS 7200000000000 /* 2 hours */
#define S2N_TICKET_DECRYPT_KEY_LIFETIME_IN_NANOS 46800000000000 /* 13 hours */
#define S2N_STATE_FORMAT_LEN 1
@@ -37,6 +43,11 @@
#define S2N_GREATER_OR_EQUAL 1
#define S2N_LESS_THAN -1
+#define S2N_TLS12_SESSION_SIZE S2N_STATE_FORMAT_LEN + \
+ S2N_SESSION_TICKET_SIZE_LEN + \
+ S2N_TLS12_TICKET_SIZE_IN_BYTES + \
+ S2N_TLS12_STATE_SIZE_IN_BYTES
+
struct s2n_connection;
struct s2n_config;
@@ -52,9 +63,19 @@ struct s2n_ticket_key_weight {
uint8_t key_index;
};
+struct s2n_ticket_fields {
+ struct s2n_blob session_secret;
+ uint32_t ticket_age_add;
+};
+
+struct s2n_session_ticket {
+ struct s2n_blob ticket_data;
+ uint32_t session_lifetime;
+};
+
extern struct s2n_ticket_key *s2n_find_ticket_key(struct s2n_config *config, const uint8_t *name);
extern int s2n_encrypt_session_ticket(struct s2n_connection *conn, struct s2n_stuffer *to);
-extern int s2n_decrypt_session_ticket(struct s2n_connection *conn);
+extern int s2n_decrypt_session_ticket(struct s2n_connection *conn, struct s2n_stuffer *from);
extern int s2n_encrypt_session_cache(struct s2n_connection *conn, struct s2n_stuffer *to);
extern int s2n_decrypt_session_cache(struct s2n_connection *conn, struct s2n_stuffer *from);
extern int s2n_config_is_encrypt_decrypt_key_available(struct s2n_config *config);
@@ -67,6 +88,14 @@ typedef enum {
S2N_STATE_WITH_SESSION_TICKET
} s2n_client_tls_session_state_format;
+typedef enum {
+ S2N_SERIALIZED_FORMAT_TLS12_V1 = 1,
+ S2N_SERIALIZED_FORMAT_TLS13_V1,
+ S2N_SERIALIZED_FORMAT_TLS12_V2,
+ S2N_SERIALIZED_FORMAT_TLS12_V3,
+} s2n_serial_format_version;
+
extern int s2n_allowed_to_cache_connection(struct s2n_connection *conn);
extern int s2n_resume_from_cache(struct s2n_connection *conn);
-extern int s2n_store_to_cache(struct s2n_connection *conn);
+S2N_RESULT s2n_store_to_cache(struct s2n_connection *conn);
+S2N_RESULT s2n_connection_get_session_state_size(struct s2n_connection *conn, size_t *state_size);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_security_policies.c b/contrib/restricted/aws/s2n/tls/s2n_security_policies.c
index 388fb25b56..90c0cb16a3 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_security_policies.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_security_policies.c
@@ -13,7 +13,7 @@
* permissions and limitations under the License.
*/
-#include <s2n.h>
+#include "api/s2n.h"
#include "tls/s2n_security_policies.h"
#include "tls/s2n_connection.h"
@@ -27,9 +27,9 @@ const struct s2n_security_policy security_policy_20170210 = {
.ecc_preferences = &s2n_ecc_preferences_20140601,
};
-const struct s2n_security_policy security_policy_20201110 = {
+const struct s2n_security_policy security_policy_default_tls13 = {
.minimum_protocol_version = S2N_TLS10,
- .cipher_preferences = &cipher_preferences_20190801,
+ .cipher_preferences = &cipher_preferences_20210831,
.kem_preferences = &kem_preferences_null,
.signature_preferences = &s2n_signature_preferences_20200207,
.certificate_signature_preferences = &s2n_certificate_signature_preferences_20201110,
@@ -70,6 +70,14 @@ const struct s2n_security_policy security_policy_20170405 = {
.ecc_preferences = &s2n_ecc_preferences_20140601,
};
+const struct s2n_security_policy security_policy_20170405_gcm = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_20170405_gcm,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20140601,
+ .ecc_preferences = &s2n_ecc_preferences_20140601,
+};
+
const struct s2n_security_policy security_policy_elb_2015_04 = {
.minimum_protocol_version = S2N_TLS10,
.cipher_preferences = &elb_security_policy_2015_04,
@@ -281,6 +289,46 @@ const struct s2n_security_policy security_policy_cloudfront_tls_1_2_2019_legacy
.ecc_preferences = &s2n_ecc_preferences_20140601,
};
+const struct s2n_security_policy security_policy_aws_crt_sdk_ssl_v3 = {
+ .minimum_protocol_version = S2N_SSLv3,
+ .cipher_preferences = &cipher_preferences_aws_crt_sdk_ssl_v3,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_aws_crt_sdk_tls_10 = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_aws_crt_sdk_default,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_aws_crt_sdk_tls_11 = {
+ .minimum_protocol_version = S2N_TLS11,
+ .cipher_preferences = &cipher_preferences_aws_crt_sdk_default,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_aws_crt_sdk_tls_12 = {
+ .minimum_protocol_version = S2N_TLS12,
+ .cipher_preferences = &cipher_preferences_aws_crt_sdk_default,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_aws_crt_sdk_tls_13 = {
+ .minimum_protocol_version = S2N_TLS13,
+ .cipher_preferences = &cipher_preferences_aws_crt_sdk_tls_13,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
const struct s2n_security_policy security_policy_kms_tls_1_0_2018_10 = {
.minimum_protocol_version = S2N_TLS10,
.cipher_preferences = &cipher_preferences_kms_tls_1_0_2018_10,
@@ -289,6 +337,14 @@ const struct s2n_security_policy security_policy_kms_tls_1_0_2018_10 = {
.ecc_preferences = &s2n_ecc_preferences_20140601,
};
+const struct s2n_security_policy security_policy_kms_tls_1_0_2021_08 = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_kms_tls_1_0_2021_08,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
const struct s2n_security_policy security_policy_kms_pq_tls_1_0_2019_06 = {
.minimum_protocol_version = S2N_TLS10,
.cipher_preferences = &cipher_preferences_kms_pq_tls_1_0_2019_06,
@@ -337,6 +393,88 @@ const struct s2n_security_policy security_policy_pq_tls_1_0_2020_12 = {
.ecc_preferences = &s2n_ecc_preferences_20200310,
};
+const struct s2n_security_policy security_policy_pq_tls_1_1_2021_05_17 = {
+ .minimum_protocol_version = S2N_TLS11,
+ .cipher_preferences = &cipher_preferences_pq_tls_1_1_2021_05_17,
+ .kem_preferences = &kem_preferences_pq_tls_1_0_2021_05,
+ .signature_preferences = &s2n_signature_preferences_20140601,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_18 = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_pq_tls_1_0_2021_05_18,
+ .kem_preferences = &kem_preferences_pq_tls_1_0_2021_05,
+ .signature_preferences = &s2n_signature_preferences_20140601,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_19 = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_pq_tls_1_0_2021_05_19,
+ .kem_preferences = &kem_preferences_pq_tls_1_0_2021_05,
+ .signature_preferences = &s2n_signature_preferences_20140601,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_20 = {
+ .minimum_protocol_version = S2N_TLS10,
+ /* Yes, this is the same cipher_preferences as kms_pq_tls_1_0_2020_07. Both allow Kyber, BIKE, SIKE. The difference
+ * between these policies is the kem_preferences, which have been updated to prefer Round 3 over Round 2. */
+ .cipher_preferences = &cipher_preferences_kms_pq_tls_1_0_2020_07,
+ .kem_preferences = &kem_preferences_pq_tls_1_0_2021_05,
+ .signature_preferences = &s2n_signature_preferences_20140601,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_pq_tls_1_1_2021_05_21 = {
+ .minimum_protocol_version = S2N_TLS11,
+ .cipher_preferences = &cipher_preferences_pq_tls_1_1_2021_05_21,
+ .kem_preferences = &kem_preferences_pq_tls_1_0_2021_05,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_22 = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_pq_tls_1_0_2021_05_22,
+ .kem_preferences = &kem_preferences_pq_tls_1_0_2021_05,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_23 = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_pq_tls_1_0_2021_05_23,
+ .kem_preferences = &kem_preferences_pq_tls_1_0_2021_05,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_24 = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_pq_tls_1_0_2021_05_24,
+ .kem_preferences = &kem_preferences_pq_tls_1_0_2021_05,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_25 = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_pq_tls_1_0_2021_05_25,
+ .kem_preferences = &kem_preferences_pq_tls_1_0_2021_05,
+ .signature_preferences = &s2n_signature_preferences_20140601,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_26 = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_pq_tls_1_0_2021_05_26,
+ .kem_preferences = &kem_preferences_pq_tls_1_0_2021_05,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
const struct s2n_security_policy security_policy_kms_fips_tls_1_2_2018_10 = {
.minimum_protocol_version = S2N_TLS12,
.cipher_preferences = &cipher_preferences_kms_fips_tls_1_2_2018_10,
@@ -345,6 +483,14 @@ const struct s2n_security_policy security_policy_kms_fips_tls_1_2_2018_10 = {
.ecc_preferences = &s2n_ecc_preferences_20140601,
};
+const struct s2n_security_policy security_policy_kms_fips_tls_1_2_2021_08 = {
+ .minimum_protocol_version = S2N_TLS12,
+ .cipher_preferences = &cipher_preferences_kms_fips_tls_1_2_2021_08,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20140601,
+};
+
const struct s2n_security_policy security_policy_20140601 = {
.minimum_protocol_version = S2N_SSLv3,
.cipher_preferences = &cipher_preferences_20140601,
@@ -441,6 +587,30 @@ const struct s2n_security_policy security_policy_20190214 = {
.ecc_preferences = &s2n_ecc_preferences_20140601,
};
+const struct s2n_security_policy security_policy_20190214_gcm = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_20190214_gcm,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20140601,
+ .ecc_preferences = &s2n_ecc_preferences_20140601,
+};
+
+const struct s2n_security_policy security_policy_20210825 = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_20210825,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
+const struct s2n_security_policy security_policy_20210825_gcm = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_20210825_gcm,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20200207,
+ .ecc_preferences = &s2n_ecc_preferences_20200310,
+};
+
const struct s2n_security_policy security_policy_20170328 = {
.minimum_protocol_version = S2N_TLS10,
.cipher_preferences = &cipher_preferences_20170328,
@@ -449,6 +619,14 @@ const struct s2n_security_policy security_policy_20170328 = {
.ecc_preferences = &s2n_ecc_preferences_20140601,
};
+const struct s2n_security_policy security_policy_20170328_gcm = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_20170328_gcm,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20140601,
+ .ecc_preferences = &s2n_ecc_preferences_20140601,
+};
+
const struct s2n_security_policy security_policy_20170718 = {
.minimum_protocol_version = S2N_TLS10,
.cipher_preferences = &cipher_preferences_20170718,
@@ -457,6 +635,14 @@ const struct s2n_security_policy security_policy_20170718 = {
.ecc_preferences = &s2n_ecc_preferences_20140601,
};
+const struct s2n_security_policy security_policy_20170718_gcm = {
+ .minimum_protocol_version = S2N_TLS10,
+ .cipher_preferences = &cipher_preferences_20170718_gcm,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20140601,
+ .ecc_preferences = &s2n_ecc_preferences_20140601,
+};
+
const struct s2n_security_policy security_policy_20201021 = {
.minimum_protocol_version = S2N_TLS10,
.cipher_preferences = &cipher_preferences_20190122,
@@ -465,6 +651,22 @@ const struct s2n_security_policy security_policy_20201021 = {
.ecc_preferences = &s2n_ecc_preferences_20201021,
};
+const struct s2n_security_policy security_policy_20210816 = {
+ .minimum_protocol_version = S2N_TLS12,
+ .cipher_preferences = &cipher_preferences_20210816,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20210816,
+ .ecc_preferences = &s2n_ecc_preferences_20210816,
+};
+
+const struct s2n_security_policy security_policy_20210816_gcm = {
+ .minimum_protocol_version = S2N_TLS12,
+ .cipher_preferences = &cipher_preferences_20210816_gcm,
+ .kem_preferences = &kem_preferences_null,
+ .signature_preferences = &s2n_signature_preferences_20210816,
+ .ecc_preferences = &s2n_ecc_preferences_20210816,
+};
+
const struct s2n_security_policy security_policy_test_all = {
.minimum_protocol_version = S2N_SSLv3,
.cipher_preferences = &cipher_preferences_test_all,
@@ -531,7 +733,7 @@ const struct s2n_security_policy security_policy_null = {
struct s2n_security_policy_selection security_policy_selection[] = {
{ .version="default", .security_policy=&security_policy_20170210, .ecc_extension_required=0, .pq_kem_extension_required=0 },
- { .version="default_tls13", .security_policy=&security_policy_20201110, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="default_tls13", .security_policy=&security_policy_default_tls13, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="default_fips", .security_policy=&security_policy_20170405, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="ELBSecurityPolicy-TLS-1-0-2015-04", .security_policy=&security_policy_elb_2015_04, .ecc_extension_required=0, .pq_kem_extension_required=0 },
/* Not a mistake. TLS-1-0-2015-05 and 2016-08 are equivalent */
@@ -563,14 +765,32 @@ struct s2n_security_policy_selection security_policy_selection[] = {
{ .version="CloudFront-TLS-1-1-2016-Legacy", .security_policy=&security_policy_cloudfront_tls_1_1_2016_legacy, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="CloudFront-TLS-1-2-2018-Legacy", .security_policy=&security_policy_cloudfront_tls_1_2_2018_legacy, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="CloudFront-TLS-1-2-2019-Legacy", .security_policy=&security_policy_cloudfront_tls_1_2_2019_legacy, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="AWS-CRT-SDK-SSLv3.0", .security_policy=&security_policy_aws_crt_sdk_ssl_v3, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="AWS-CRT-SDK-TLSv1.0", .security_policy=&security_policy_aws_crt_sdk_tls_10, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="AWS-CRT-SDK-TLSv1.1", .security_policy=&security_policy_aws_crt_sdk_tls_11, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="AWS-CRT-SDK-TLSv1.2", .security_policy=&security_policy_aws_crt_sdk_tls_12, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="AWS-CRT-SDK-TLSv1.3", .security_policy=&security_policy_aws_crt_sdk_tls_13, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ /* KMS TLS Policies*/
{ .version="KMS-TLS-1-0-2018-10", .security_policy=&security_policy_kms_tls_1_0_2018_10, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="KMS-TLS-1-0-2021-08", .security_policy=&security_policy_kms_tls_1_0_2021_08, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="KMS-FIPS-TLS-1-2-2018-10", .security_policy=&security_policy_kms_fips_tls_1_2_2018_10, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="KMS-FIPS-TLS-1-2-2021-08", .security_policy=&security_policy_kms_fips_tls_1_2_2021_08, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="KMS-PQ-TLS-1-0-2019-06", .security_policy=&security_policy_kms_pq_tls_1_0_2019_06, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="KMS-PQ-TLS-1-0-2020-02", .security_policy=&security_policy_kms_pq_tls_1_0_2020_02, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="KMS-PQ-TLS-1-0-2020-07", .security_policy=&security_policy_kms_pq_tls_1_0_2020_07, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="PQ-SIKE-TEST-TLS-1-0-2019-11", .security_policy=&security_policy_pq_sike_test_tls_1_0_2019_11, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="PQ-SIKE-TEST-TLS-1-0-2020-02", .security_policy=&security_policy_pq_sike_test_tls_1_0_2020_02, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="PQ-TLS-1-0-2020-12", .security_policy=&security_policy_pq_tls_1_0_2020_12, .ecc_extension_required=0, .pq_kem_extension_required=0 },
- { .version="KMS-FIPS-TLS-1-2-2018-10", .security_policy=&security_policy_kms_fips_tls_1_2_2018_10, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="PQ-TLS-1-1-2021-05-17", .security_policy=&security_policy_pq_tls_1_1_2021_05_17, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="PQ-TLS-1-0-2021-05-18", .security_policy=&security_policy_pq_tls_1_0_2021_05_18, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="PQ-TLS-1-0-2021-05-19", .security_policy=&security_policy_pq_tls_1_0_2021_05_19, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="PQ-TLS-1-0-2021-05-20", .security_policy=&security_policy_pq_tls_1_0_2021_05_20, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="PQ-TLS-1-1-2021-05-21", .security_policy=&security_policy_pq_tls_1_1_2021_05_21, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="PQ-TLS-1-0-2021-05-22", .security_policy=&security_policy_pq_tls_1_0_2021_05_22, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="PQ-TLS-1-0-2021-05-23", .security_policy=&security_policy_pq_tls_1_0_2021_05_23, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="PQ-TLS-1-0-2021-05-24", .security_policy=&security_policy_pq_tls_1_0_2021_05_24, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="PQ-TLS-1-0-2021-05-25", .security_policy=&security_policy_pq_tls_1_0_2021_05_25, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="PQ-TLS-1-0-2021-05-26", .security_policy=&security_policy_pq_tls_1_0_2021_05_26, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="20140601", .security_policy=&security_policy_20140601, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="20141001", .security_policy=&security_policy_20141001, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="20150202", .security_policy=&security_policy_20150202, .ecc_extension_required=0, .pq_kem_extension_required=0 },
@@ -581,9 +801,15 @@ struct s2n_security_policy_selection security_policy_selection[] = {
{ .version="20160824", .security_policy=&security_policy_20160824, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="20170210", .security_policy=&security_policy_20170210, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="20170328", .security_policy=&security_policy_20170328, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="20170328_gcm", .security_policy=&security_policy_20170328_gcm, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="20190214", .security_policy=&security_policy_20190214, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="20190214_gcm", .security_policy=&security_policy_20190214_gcm, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="20210825", .security_policy=&security_policy_20210825, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="20210825_gcm", .security_policy=&security_policy_20210825_gcm, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="20170405", .security_policy=&security_policy_20170405, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="20170405_gcm", .security_policy=&security_policy_20170405_gcm, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="20170718", .security_policy=&security_policy_20170718, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="20170718_gcm", .security_policy=&security_policy_20170718_gcm, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="20190120", .security_policy=&security_policy_20190120, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="20190121", .security_policy=&security_policy_20190121, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="20190122", .security_policy=&security_policy_20190122, .ecc_extension_required=0, .pq_kem_extension_required=0 },
@@ -591,6 +817,8 @@ struct s2n_security_policy_selection security_policy_selection[] = {
{ .version="20190802", .security_policy=&security_policy_20190802, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="20200207", .security_policy=&security_policy_test_all_tls13, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="20201021", .security_policy=&security_policy_20201021, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="20210816", .security_policy=&security_policy_20210816, .ecc_extension_required=0, .pq_kem_extension_required=0 },
+ { .version="20210816_GCM", .security_policy=&security_policy_20210816_gcm, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="test_all", .security_policy=&security_policy_test_all, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="test_all_fips", .security_policy=&security_policy_test_all_fips, .ecc_extension_required=0, .pq_kem_extension_required=0 },
{ .version="test_all_ecdsa", .security_policy=&security_policy_test_all_ecdsa, .ecc_extension_required=0, .pq_kem_extension_required=0 },
@@ -604,8 +832,8 @@ struct s2n_security_policy_selection security_policy_selection[] = {
int s2n_find_security_policy_from_version(const char *version, const struct s2n_security_policy **security_policy)
{
- notnull_check(version);
- notnull_check(security_policy);
+ POSIX_ENSURE_REF(version);
+ POSIX_ENSURE_REF(security_policy);
for (int i = 0; security_policy_selection[i].version != NULL; i++) {
if (!strcasecmp(version, security_policy_selection[i].version)) {
@@ -614,18 +842,21 @@ int s2n_find_security_policy_from_version(const char *version, const struct s2n_
}
}
- S2N_ERROR(S2N_ERR_INVALID_SECURITY_POLICY);
+ POSIX_BAIL(S2N_ERR_INVALID_SECURITY_POLICY);
}
int s2n_config_set_cipher_preferences(struct s2n_config *config, const char *version)
{
const struct s2n_security_policy *security_policy = NULL;
- GUARD(s2n_find_security_policy_from_version(version, &security_policy));
- ENSURE_POSIX_REF(security_policy);
- ENSURE_POSIX_REF(security_policy->cipher_preferences);
- ENSURE_POSIX_REF(security_policy->kem_preferences);
- ENSURE_POSIX_REF(security_policy->signature_preferences);
- ENSURE_POSIX_REF(security_policy->ecc_preferences);
+ POSIX_GUARD(s2n_find_security_policy_from_version(version, &security_policy));
+ POSIX_ENSURE_REF(security_policy);
+ POSIX_ENSURE_REF(security_policy->cipher_preferences);
+ POSIX_ENSURE_REF(security_policy->kem_preferences);
+ POSIX_ENSURE_REF(security_policy->signature_preferences);
+ POSIX_ENSURE_REF(security_policy->ecc_preferences);
+
+ /* If the security policy's minimum version is higher than what libcrypto supports, return an error. */
+ POSIX_ENSURE((security_policy->minimum_protocol_version <= s2n_get_highest_fully_supported_tls_version()), S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
config->security_policy = security_policy;
return 0;
@@ -634,12 +865,15 @@ int s2n_config_set_cipher_preferences(struct s2n_config *config, const char *ver
int s2n_connection_set_cipher_preferences(struct s2n_connection *conn, const char *version)
{
const struct s2n_security_policy *security_policy = NULL;
- GUARD(s2n_find_security_policy_from_version(version, &security_policy));
- ENSURE_POSIX_REF(security_policy);
- ENSURE_POSIX_REF(security_policy->cipher_preferences);
- ENSURE_POSIX_REF(security_policy->kem_preferences);
- ENSURE_POSIX_REF(security_policy->signature_preferences);
- ENSURE_POSIX_REF(security_policy->ecc_preferences);
+ POSIX_GUARD(s2n_find_security_policy_from_version(version, &security_policy));
+ POSIX_ENSURE_REF(security_policy);
+ POSIX_ENSURE_REF(security_policy->cipher_preferences);
+ POSIX_ENSURE_REF(security_policy->kem_preferences);
+ POSIX_ENSURE_REF(security_policy->signature_preferences);
+ POSIX_ENSURE_REF(security_policy->ecc_preferences);
+
+ /* If the security policy's minimum version is higher than what libcrypto supports, return an error. */
+ POSIX_ENSURE((security_policy->minimum_protocol_version <= s2n_get_highest_fully_supported_tls_version()), S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
conn->security_policy_override = security_policy;
return 0;
@@ -649,33 +883,30 @@ int s2n_security_policies_init()
{
for (int i = 0; security_policy_selection[i].version != NULL; i++) {
const struct s2n_security_policy *security_policy = security_policy_selection[i].security_policy;
- notnull_check(security_policy);
+ POSIX_ENSURE_REF(security_policy);
const struct s2n_cipher_preferences *cipher_preference = security_policy->cipher_preferences;
- notnull_check(cipher_preference);
+ POSIX_ENSURE_REF(cipher_preference);
const struct s2n_kem_preferences *kem_preference = security_policy->kem_preferences;
- notnull_check(kem_preference);
+ POSIX_ENSURE_REF(kem_preference);
const struct s2n_ecc_preferences *ecc_preference = security_policy->ecc_preferences;
- notnull_check(ecc_preference);
- GUARD(s2n_check_ecc_preferences_curves_list(ecc_preference));
+ POSIX_ENSURE_REF(ecc_preference);
+ POSIX_GUARD(s2n_check_ecc_preferences_curves_list(ecc_preference));
const struct s2n_signature_preferences *certificate_signature_preference = security_policy->certificate_signature_preferences;
if (certificate_signature_preference != NULL) {
- GUARD_AS_POSIX(s2n_validate_certificate_signature_preferences(certificate_signature_preference));
+ POSIX_GUARD_RESULT(s2n_validate_certificate_signature_preferences(certificate_signature_preference));
}
if (security_policy != &security_policy_null) {
- /* catch any offending security policy that does not support P-256 */
- S2N_ERROR_IF(!s2n_ecc_preferences_includes_curve(ecc_preference, TLS_EC_CURVE_SECP_256_R1), S2N_ERR_INVALID_SECURITY_POLICY);
+ /* All policies must have at least one ecc curve configured. */
+ S2N_ERROR_IF(ecc_preference->count == 0, S2N_ERR_INVALID_SECURITY_POLICY);
}
for (int j = 0; j < cipher_preference->count; j++) {
struct s2n_cipher_suite *cipher = cipher_preference->suites[j];
- notnull_check(cipher);
+ POSIX_ENSURE_REF(cipher);
- /* TLS1.3 does not include key exchange algorithms in its cipher suites,
- * but the elliptic curves extension is always required. */
if (cipher->minimum_required_tls_version >= S2N_TLS13) {
- security_policy_selection[i].ecc_extension_required = 1;
security_policy_selection[i].supports_tls13 = 1;
}
@@ -683,16 +914,16 @@ int s2n_security_policies_init()
S2N_ERROR_IF(s2n_is_valid_tls13_cipher(cipher->iana_value) ^
(cipher->minimum_required_tls_version >= S2N_TLS13), S2N_ERR_INVALID_SECURITY_POLICY);
- if (s2n_kex_includes(cipher->key_exchange_alg, &s2n_ecdhe)) {
+ if (s2n_cipher_suite_requires_ecc_extension(cipher)) {
security_policy_selection[i].ecc_extension_required = 1;
}
- if (s2n_kex_includes(cipher->key_exchange_alg, &s2n_kem)) {
+ if (s2n_cipher_suite_requires_pq_extension(cipher)) {
security_policy_selection[i].pq_kem_extension_required = 1;
}
}
- GUARD(s2n_validate_kem_preferences(kem_preference, security_policy_selection[i].pq_kem_extension_required));
+ POSIX_GUARD(s2n_validate_kem_preferences(kem_preference, security_policy_selection[i].pq_kem_extension_required));
}
return 0;
}
@@ -708,6 +939,18 @@ bool s2n_ecc_is_extension_required(const struct s2n_security_policy *security_po
return 1 == security_policy_selection[i].ecc_extension_required;
}
}
+
+ /* If cipher preference is not in the official list, compute the result */
+ const struct s2n_cipher_preferences *cipher_preferences = security_policy->cipher_preferences;
+ if (cipher_preferences == NULL) {
+ return false;
+ }
+ for (uint8_t i = 0; i < cipher_preferences->count; i++) {
+ if (s2n_cipher_suite_requires_ecc_extension(cipher_preferences->suites[i])) {
+ return true;
+ }
+ }
+
return false;
}
@@ -722,6 +965,17 @@ bool s2n_pq_kem_is_extension_required(const struct s2n_security_policy *security
return 1 == security_policy_selection[i].pq_kem_extension_required;
}
}
+
+ /* If cipher preference is not in the official list, compute the result */
+ const struct s2n_cipher_preferences *cipher_preferences = security_policy->cipher_preferences;
+ if (cipher_preferences == NULL) {
+ return false;
+ }
+ for (uint8_t i = 0; i < cipher_preferences->count; i++) {
+ if (s2n_cipher_suite_requires_pq_extension(cipher_preferences->suites[i])) {
+ return true;
+ }
+ }
return false;
}
@@ -747,7 +1001,7 @@ bool s2n_security_policy_supports_tls13(const struct s2n_security_policy *securi
}
for (uint8_t i = 0; i < cipher_preferences->count; i++) {
- if (s2n_is_valid_tls13_cipher(cipher_preferences->suites[i]->iana_value)) {
+ if (cipher_preferences->suites[i]->minimum_required_tls_version >= S2N_TLS13) {
return true;
}
}
@@ -757,13 +1011,13 @@ bool s2n_security_policy_supports_tls13(const struct s2n_security_policy *securi
int s2n_connection_is_valid_for_cipher_preferences(struct s2n_connection *conn, const char *version)
{
- notnull_check(conn);
- notnull_check(version);
- notnull_check(conn->secure.cipher_suite);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(version);
+ POSIX_ENSURE_REF(conn->secure.cipher_suite);
const struct s2n_security_policy *security_policy = NULL;
- GUARD(s2n_find_security_policy_from_version(version, &security_policy));
- notnull_check(security_policy);
+ POSIX_GUARD(s2n_find_security_policy_from_version(version, &security_policy));
+ POSIX_ENSURE_REF(security_policy);
/* make sure we dont use a tls version lower than that configured by the version */
if (s2n_connection_get_actual_protocol_version(conn) < security_policy->minimum_protocol_version) {
@@ -771,7 +1025,7 @@ int s2n_connection_is_valid_for_cipher_preferences(struct s2n_connection *conn,
}
struct s2n_cipher_suite *cipher = conn->secure.cipher_suite;
- notnull_check(cipher);
+ POSIX_ENSURE_REF(cipher);
for (int i = 0; i < security_policy->cipher_preferences->count; ++i) {
if (0 == memcmp(security_policy->cipher_preferences->suites[i]->iana_value, cipher->iana_value, S2N_TLS_CIPHER_SUITE_LEN)) {
return 1;
@@ -782,21 +1036,22 @@ int s2n_connection_is_valid_for_cipher_preferences(struct s2n_connection *conn,
}
int s2n_validate_kem_preferences(const struct s2n_kem_preferences *kem_preferences, bool pq_kem_extension_required) {
- notnull_check(kem_preferences);
+ POSIX_ENSURE_REF(kem_preferences);
/* Basic sanity checks to assert that the count is 0 if and only if the associated list is NULL */
- ENSURE_POSIX(S2N_IFF(kem_preferences->tls13_kem_group_count == 0, kem_preferences->tls13_kem_groups == NULL),
+ POSIX_ENSURE(S2N_IFF(kem_preferences->tls13_kem_group_count == 0, kem_preferences->tls13_kem_groups == NULL),
S2N_ERR_INVALID_SECURITY_POLICY);
- ENSURE_POSIX(S2N_IFF(kem_preferences->kem_count == 0, kem_preferences->kems == NULL),
+ POSIX_ENSURE(S2N_IFF(kem_preferences->kem_count == 0, kem_preferences->kems == NULL),
S2N_ERR_INVALID_SECURITY_POLICY);
+ POSIX_ENSURE(kem_preferences->tls13_kem_group_count <= S2N_SUPPORTED_KEM_GROUPS_COUNT, S2N_ERR_ARRAY_INDEX_OOB);
/* The PQ KEM extension is applicable only to TLS 1.2 */
if (pq_kem_extension_required) {
- ENSURE_POSIX(kem_preferences->kem_count > 0, S2N_ERR_INVALID_SECURITY_POLICY);
- ENSURE_POSIX(kem_preferences->kems != NULL, S2N_ERR_INVALID_SECURITY_POLICY);
+ POSIX_ENSURE(kem_preferences->kem_count > 0, S2N_ERR_INVALID_SECURITY_POLICY);
+ POSIX_ENSURE(kem_preferences->kems != NULL, S2N_ERR_INVALID_SECURITY_POLICY);
} else {
- ENSURE_POSIX(kem_preferences->kem_count == 0, S2N_ERR_INVALID_SECURITY_POLICY);
- ENSURE_POSIX(kem_preferences->kems == NULL, S2N_ERR_INVALID_SECURITY_POLICY);
+ POSIX_ENSURE(kem_preferences->kem_count == 0, S2N_ERR_INVALID_SECURITY_POLICY);
+ POSIX_ENSURE(kem_preferences->kems == NULL, S2N_ERR_INVALID_SECURITY_POLICY);
}
return S2N_SUCCESS;
@@ -804,7 +1059,7 @@ int s2n_validate_kem_preferences(const struct s2n_kem_preferences *kem_preferenc
S2N_RESULT s2n_validate_certificate_signature_preferences(const struct s2n_signature_preferences *certificate_signature_preferences)
{
- ENSURE_REF(certificate_signature_preferences);
+ RESULT_ENSURE_REF(certificate_signature_preferences);
size_t rsa_pss_scheme_count = 0;
@@ -817,6 +1072,6 @@ S2N_RESULT s2n_validate_certificate_signature_preferences(const struct s2n_signa
/* The Openssl function used to parse signatures off certificates does not differentiate between any rsa pss
* signature schemes. Therefore a security policy with a certificate signatures preference list must include
* all rsa_pss signature schemes. */
- ENSURE(rsa_pss_scheme_count == NUM_RSA_PSS_SCHEMES || rsa_pss_scheme_count == 0, S2N_ERR_INVALID_SECURITY_POLICY);
+ RESULT_ENSURE(rsa_pss_scheme_count == NUM_RSA_PSS_SCHEMES || rsa_pss_scheme_count == 0, S2N_ERR_INVALID_SECURITY_POLICY);
return S2N_RESULT_OK;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_security_policies.h b/contrib/restricted/aws/s2n/tls/s2n_security_policies.h
index f724eb1934..be28fb584a 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_security_policies.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_security_policies.h
@@ -53,12 +53,16 @@ extern const struct s2n_security_policy security_policy_20160804;
extern const struct s2n_security_policy security_policy_20160824;
extern const struct s2n_security_policy security_policy_20170210;
extern const struct s2n_security_policy security_policy_20170328;
+extern const struct s2n_security_policy security_policy_20170328_gcm;
extern const struct s2n_security_policy security_policy_20170405;
+extern const struct s2n_security_policy security_policy_20170405_gcm;
extern const struct s2n_security_policy security_policy_20170718;
+extern const struct s2n_security_policy security_policy_20170718_gcm;
extern const struct s2n_security_policy security_policy_20190214;
+extern const struct s2n_security_policy security_policy_20190214_gcm;
extern const struct s2n_security_policy security_policy_20190801;
extern const struct s2n_security_policy security_policy_20190802;
-extern const struct s2n_security_policy security_policy_20201110;
+extern const struct s2n_security_policy security_policy_default_tls13;
extern const struct s2n_security_policy security_policy_test_all;
extern const struct s2n_security_policy security_policy_test_all_tls12;
@@ -79,12 +83,28 @@ extern const struct s2n_security_policy security_policy_elb_fs_1_2_2019_08;
extern const struct s2n_security_policy security_policy_elb_fs_1_1_2019_08;
extern const struct s2n_security_policy security_policy_elb_fs_1_2_res_2019_08;
+extern const struct s2n_security_policy security_policy_aws_crt_sdk_ssl_v3;
+extern const struct s2n_security_policy security_policy_aws_crt_sdk_tls_10;
+extern const struct s2n_security_policy security_policy_aws_crt_sdk_tls_11;
+extern const struct s2n_security_policy security_policy_aws_crt_sdk_tls_12;
+extern const struct s2n_security_policy security_policy_aws_crt_sdk_tls_13;
+
extern const struct s2n_security_policy security_policy_kms_pq_tls_1_0_2019_06;
extern const struct s2n_security_policy security_policy_kms_pq_tls_1_0_2020_02;
extern const struct s2n_security_policy security_policy_kms_pq_tls_1_0_2020_07;
extern const struct s2n_security_policy security_policy_pq_sike_test_tls_1_0_2019_11;
extern const struct s2n_security_policy security_policy_pq_sike_test_tls_1_0_2020_02;
extern const struct s2n_security_policy security_policy_pq_tls_1_0_2020_12;
+extern const struct s2n_security_policy security_policy_pq_tls_1_1_2021_05_17;
+extern const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_18;
+extern const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_19;
+extern const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_20;
+extern const struct s2n_security_policy security_policy_pq_tls_1_1_2021_05_21;
+extern const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_22;
+extern const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_23;
+extern const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_24;
+extern const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_25;
+extern const struct s2n_security_policy security_policy_pq_tls_1_0_2021_05_26;
extern const struct s2n_security_policy security_policy_cloudfront_upstream;
extern const struct s2n_security_policy security_policy_cloudfront_upstream_tls10;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_send.c b/contrib/restricted/aws/s2n/tls/s2n_send.c
index 4f59054845..013fa28b66 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_send.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_send.c
@@ -15,7 +15,7 @@
#include <sys/param.h>
#include <errno.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include "error/s2n_errno.h"
@@ -45,9 +45,9 @@ int s2n_flush(struct s2n_connection *conn, s2n_blocked_status * blocked)
w = s2n_connection_send_stuffer(&conn->out, conn, s2n_stuffer_data_available(&conn->out));
if (w < 0) {
if (errno == EWOULDBLOCK || errno == EAGAIN) {
- S2N_ERROR(S2N_ERR_IO_BLOCKED);
+ POSIX_BAIL(S2N_ERR_IO_BLOCKED);
}
- S2N_ERROR(S2N_ERR_IO);
+ POSIX_BAIL(S2N_ERR_IO);
}
conn->wire_bytes_out += w;
}
@@ -55,15 +55,15 @@ int s2n_flush(struct s2n_connection *conn, s2n_blocked_status * blocked)
if (conn->closing) {
conn->closed = 1;
}
- GUARD(s2n_stuffer_rewrite(&conn->out));
+ POSIX_GUARD(s2n_stuffer_rewrite(&conn->out));
/* If there's an alert pending out, send that */
if (s2n_stuffer_data_available(&conn->reader_alert_out) == 2) {
struct s2n_blob alert = {0};
alert.data = conn->reader_alert_out.blob.data;
alert.size = 2;
- GUARD(s2n_record_write(conn, TLS_ALERT, &alert));
- GUARD(s2n_stuffer_rewrite(&conn->reader_alert_out));
+ POSIX_GUARD(s2n_record_write(conn, TLS_ALERT, &alert));
+ POSIX_GUARD(s2n_stuffer_rewrite(&conn->reader_alert_out));
conn->closing = 1;
/* Actually write it ... */
@@ -75,8 +75,8 @@ int s2n_flush(struct s2n_connection *conn, s2n_blocked_status * blocked)
struct s2n_blob alert = {0};
alert.data = conn->writer_alert_out.blob.data;
alert.size = 2;
- GUARD(s2n_record_write(conn, TLS_ALERT, &alert));
- GUARD(s2n_stuffer_rewrite(&conn->writer_alert_out));
+ POSIX_GUARD(s2n_record_write(conn, TLS_ALERT, &alert));
+ POSIX_GUARD(s2n_stuffer_rewrite(&conn->writer_alert_out));
conn->closing = 1;
/* Actually write it ... */
@@ -92,11 +92,11 @@ ssize_t s2n_sendv_with_offset_impl(struct s2n_connection *conn, const struct iov
{
ssize_t user_data_sent, total_size = 0;
- S2N_ERROR_IF(conn->closed, S2N_ERR_CLOSED);
- S2N_ERROR_IF(conn->config->quic_enabled, S2N_ERR_UNSUPPORTED_WITH_QUIC);
+ POSIX_ENSURE(!conn->closed, S2N_ERR_CLOSED);
+ POSIX_ENSURE(!s2n_connection_is_quic_enabled(conn), S2N_ERR_UNSUPPORTED_WITH_QUIC);
/* Flush any pending I/O */
- GUARD(s2n_flush(conn, blocked));
+ POSIX_GUARD(s2n_flush(conn, blocked));
/* Acknowledge consumed and flushed user data as sent */
user_data_sent = conn->current_user_data_consumed;
@@ -104,7 +104,7 @@ ssize_t s2n_sendv_with_offset_impl(struct s2n_connection *conn, const struct iov
*blocked = S2N_BLOCKED_ON_WRITE;
uint16_t max_payload_size = 0;
- GUARD_AS_POSIX(s2n_record_max_write_payload_size(conn, &max_payload_size));
+ POSIX_GUARD_RESULT(s2n_record_max_write_payload_size(conn, &max_payload_size));
/* TLS 1.0 and SSLv3 are vulnerable to the so-called Beast attack. Work
* around this by splitting messages into one byte records, and then
@@ -129,15 +129,16 @@ ssize_t s2n_sendv_with_offset_impl(struct s2n_connection *conn, const struct iov
bufs = _bufs;
count = _count;
}
- for (int i = 0; i < count; i++) {
+ for (ssize_t i = 0; i < count; i++) {
total_size += bufs[i].iov_len;
}
total_size -= offs;
S2N_ERROR_IF(conn->current_user_data_consumed > total_size, S2N_ERR_SEND_SIZE);
+ POSIX_GUARD_RESULT(s2n_early_data_validate_send(conn, total_size));
if (conn->dynamic_record_timeout_threshold > 0) {
uint64_t elapsed;
- GUARD_AS_POSIX(s2n_timer_elapsed(conn->config, &conn->write_timer, &elapsed));
+ POSIX_GUARD_RESULT(s2n_timer_elapsed(conn->config, &conn->write_timer, &elapsed));
/* Reset record size back to a single segment after threshold seconds of inactivity */
if (elapsed - conn->last_write_elapsed > (uint64_t) conn->dynamic_record_timeout_threshold * 1000000000) {
conn->active_application_bytes_consumed = 0;
@@ -154,7 +155,7 @@ ssize_t s2n_sendv_with_offset_impl(struct s2n_connection *conn, const struct iov
*/
if (conn->active_application_bytes_consumed < (uint64_t) conn->dynamic_record_resize_threshold) {
uint16_t min_payload_size = 0;
- GUARD_AS_POSIX(s2n_record_min_write_payload_size(conn, &min_payload_size));
+ POSIX_GUARD_RESULT(s2n_record_min_write_payload_size(conn, &min_payload_size));
to_write = MIN(min_payload_size, to_write);
}
@@ -168,12 +169,12 @@ ssize_t s2n_sendv_with_offset_impl(struct s2n_connection *conn, const struct iov
}
}
- GUARD(s2n_stuffer_rewrite(&conn->out));
+ POSIX_GUARD(s2n_stuffer_rewrite(&conn->out));
- GUARD(s2n_post_handshake_send(conn, blocked));
+ POSIX_GUARD(s2n_post_handshake_send(conn, blocked));
/* Write and encrypt the record */
- GUARD(s2n_record_writev(conn, TLS_APPLICATION_DATA, bufs, count,
+ POSIX_GUARD(s2n_record_writev(conn, TLS_APPLICATION_DATA, bufs, count,
conn->current_user_data_consumed + offs, to_write));
conn->current_user_data_consumed += to_write;
conn->active_application_bytes_consumed += to_write;
@@ -200,12 +201,13 @@ ssize_t s2n_sendv_with_offset_impl(struct s2n_connection *conn, const struct iov
*blocked = S2N_NOT_BLOCKED;
+ POSIX_GUARD_RESULT(s2n_early_data_record_bytes(conn, total_size));
return total_size;
}
ssize_t s2n_sendv_with_offset(struct s2n_connection *conn, const struct iovec *bufs, ssize_t count, ssize_t offs, s2n_blocked_status *blocked)
{
- ENSURE_POSIX(!conn->send_in_use, S2N_ERR_REENTRANCY);
+ POSIX_ENSURE(!conn->send_in_use, S2N_ERR_REENTRANCY);
conn->send_in_use = true;
ssize_t result = s2n_sendv_with_offset_impl(conn, bufs, count, offs, blocked);
conn->send_in_use = false;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_server_cert.c b/contrib/restricted/aws/s2n/tls/s2n_server_cert.c
index 0188505ae1..6be1ac6202 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_server_cert.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_server_cert.c
@@ -13,7 +13,7 @@
* permissions and limitations under the License.
*/
-#include <s2n.h>
+#include "api/s2n.h"
#include "error/s2n_errno.h"
@@ -27,30 +27,30 @@ int s2n_server_cert_recv(struct s2n_connection *conn)
{
if (conn->actual_protocol_version == S2N_TLS13) {
uint8_t certificate_request_context_len;
- GUARD(s2n_stuffer_read_uint8(&conn->handshake.io, &certificate_request_context_len));
+ POSIX_GUARD(s2n_stuffer_read_uint8(&conn->handshake.io, &certificate_request_context_len));
S2N_ERROR_IF(certificate_request_context_len != 0, S2N_ERR_BAD_MESSAGE);
}
uint32_t size_of_all_certificates;
- GUARD(s2n_stuffer_read_uint24(&conn->handshake.io, &size_of_all_certificates));
+ POSIX_GUARD(s2n_stuffer_read_uint24(&conn->handshake.io, &size_of_all_certificates));
S2N_ERROR_IF(size_of_all_certificates > s2n_stuffer_data_available(&conn->handshake.io) || size_of_all_certificates < 3, S2N_ERR_BAD_MESSAGE);
s2n_cert_public_key public_key;
- GUARD(s2n_pkey_zero_init(&public_key));
+ POSIX_GUARD(s2n_pkey_zero_init(&public_key));
s2n_pkey_type actual_cert_pkey_type;
struct s2n_blob cert_chain = {0};
cert_chain.size = size_of_all_certificates;
cert_chain.data = s2n_stuffer_raw_read(&conn->handshake.io, size_of_all_certificates);
- notnull_check(cert_chain.data);
+ POSIX_ENSURE_REF(cert_chain.data);
- GUARD(s2n_x509_validator_validate_cert_chain(&conn->x509_validator, conn, cert_chain.data,
- cert_chain.size, &actual_cert_pkey_type, &public_key));
+ POSIX_ENSURE(s2n_x509_validator_validate_cert_chain(&conn->x509_validator, conn, cert_chain.data,
+ cert_chain.size, &actual_cert_pkey_type, &public_key) == S2N_CERT_OK, S2N_ERR_CERT_UNTRUSTED);
- GUARD(s2n_is_cert_type_valid_for_auth(conn, actual_cert_pkey_type));
- GUARD(s2n_pkey_setup_for_type(&public_key, actual_cert_pkey_type));
- conn->secure.server_public_key = public_key;
+ POSIX_GUARD(s2n_is_cert_type_valid_for_auth(conn, actual_cert_pkey_type));
+ POSIX_GUARD(s2n_pkey_setup_for_type(&public_key, actual_cert_pkey_type));
+ conn->handshake_params.server_public_key = public_key;
return 0;
}
@@ -62,10 +62,10 @@ int s2n_server_cert_send(struct s2n_connection *conn)
/* server's certificate request context should always be of zero length */
/* https://tools.ietf.org/html/rfc8446#section-4.4.2 */
uint8_t certificate_request_context_len = 0;
- GUARD(s2n_stuffer_write_uint8(&conn->handshake.io, certificate_request_context_len));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&conn->handshake.io, certificate_request_context_len));
}
- GUARD(s2n_send_cert_chain(conn, &conn->handshake.io, conn->handshake_params.our_chain_and_key));
+ POSIX_GUARD(s2n_send_cert_chain(conn, &conn->handshake.io, conn->handshake_params.our_chain_and_key));
return 0;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_server_cert_request.c b/contrib/restricted/aws/s2n/tls/s2n_server_cert_request.c
index 26dcecc56c..45790f0645 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_server_cert_request.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_server_cert_request.c
@@ -13,7 +13,7 @@
* permissions and limitations under the License.
*/
-#include <s2n.h>
+#include "api/s2n.h"
#include "crypto/s2n_certificate.h"
#include "error/s2n_errno.h"
@@ -60,26 +60,13 @@ static uint8_t s2n_cert_type_preference_list_legacy_dss[] = {
S2N_CERT_TYPE_ECDSA_SIGN
};
-static int s2n_cert_type_to_pkey_type(s2n_cert_type cert_type_in, s2n_pkey_type *pkey_type_out) {
- switch(cert_type_in) {
- case S2N_CERT_TYPE_RSA_SIGN:
- *pkey_type_out = S2N_PKEY_TYPE_RSA;
- return 0;
- case S2N_CERT_TYPE_ECDSA_SIGN:
- *pkey_type_out = S2N_PKEY_TYPE_ECDSA;
- return 0;
- default:
- S2N_ERROR(S2N_CERT_ERR_TYPE_UNSUPPORTED);
- }
-}
-
static int s2n_recv_client_cert_preferences(struct s2n_stuffer *in, s2n_cert_type *chosen_cert_type_out)
{
uint8_t cert_types_len;
- GUARD(s2n_stuffer_read_uint8(in, &cert_types_len));
+ POSIX_GUARD(s2n_stuffer_read_uint8(in, &cert_types_len));
uint8_t *their_cert_type_pref_list = s2n_stuffer_raw_read(in, cert_types_len);
- notnull_check(their_cert_type_pref_list);
+ POSIX_ENSURE_REF(their_cert_type_pref_list);
/* Iterate through our preference list from most to least preferred, and return the first match that we find. */
for (int our_cert_pref_idx = 0; our_cert_pref_idx < sizeof(s2n_cert_type_preference_list); our_cert_pref_idx++) {
@@ -91,18 +78,19 @@ static int s2n_recv_client_cert_preferences(struct s2n_stuffer *in, s2n_cert_typ
}
}
- S2N_ERROR(S2N_ERR_CERT_TYPE_UNSUPPORTED);
+ POSIX_BAIL(S2N_ERR_CERT_TYPE_UNSUPPORTED);
}
static int s2n_set_cert_chain_as_client(struct s2n_connection *conn)
{
if (s2n_config_get_num_default_certs(conn->config) > 0) {
- GUARD(s2n_choose_sig_scheme_from_peer_preference_list(conn, &conn->handshake_params.server_sig_hash_algs,
- &conn->secure.client_cert_sig_scheme));
+ POSIX_GUARD(s2n_choose_sig_scheme_from_peer_preference_list(conn, &conn->handshake_params.server_sig_hash_algs,
+ &conn->handshake_params.client_cert_sig_scheme));
struct s2n_cert_chain_and_key *cert = s2n_config_get_single_default_cert(conn->config);
- notnull_check(cert);
+ POSIX_ENSURE_REF(cert);
conn->handshake_params.our_chain_and_key = cert;
+ conn->handshake_params.client_cert_pkey_type = s2n_cert_chain_and_key_get_pkey_type(cert);
}
return 0;
@@ -114,13 +102,13 @@ int s2n_tls13_cert_req_recv(struct s2n_connection *conn)
/* read request context length */
uint8_t request_context_length;
- GUARD(s2n_stuffer_read_uint8(in, &request_context_length));
+ POSIX_GUARD(s2n_stuffer_read_uint8(in, &request_context_length));
/* RFC 8446: This field SHALL be zero length unless used for the post-handshake authentication */
S2N_ERROR_IF(request_context_length != 0, S2N_ERR_BAD_MESSAGE);
- GUARD(s2n_extension_list_recv(S2N_EXTENSION_LIST_CERT_REQ, conn, in));
+ POSIX_GUARD(s2n_extension_list_recv(S2N_EXTENSION_LIST_CERT_REQ, conn, in));
- GUARD(s2n_set_cert_chain_as_client(conn));
+ POSIX_GUARD(s2n_set_cert_chain_as_client(conn));
return S2N_SUCCESS;
}
@@ -130,26 +118,25 @@ int s2n_cert_req_recv(struct s2n_connection *conn)
struct s2n_stuffer *in = &conn->handshake.io;
s2n_cert_type cert_type = 0;
- GUARD(s2n_recv_client_cert_preferences(in, &cert_type));
- GUARD(s2n_cert_type_to_pkey_type(cert_type, &conn->secure.client_cert_pkey_type));
+ POSIX_GUARD(s2n_recv_client_cert_preferences(in, &cert_type));
if (conn->actual_protocol_version == S2N_TLS12) {
- GUARD(s2n_recv_supported_sig_scheme_list(in, &conn->handshake_params.server_sig_hash_algs));
+ POSIX_GUARD(s2n_recv_supported_sig_scheme_list(in, &conn->handshake_params.server_sig_hash_algs));
}
uint16_t cert_authorities_len = 0;
- GUARD(s2n_stuffer_read_uint16(in, &cert_authorities_len));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &cert_authorities_len));
/* For now we don't parse X.501 encoded CA Distinguished Names.
* Don't fail just yet as we still may succeed if we provide
* right certificate or if ClientAuth is optional. */
- GUARD(s2n_stuffer_skip_read(in, cert_authorities_len));
+ POSIX_GUARD(s2n_stuffer_skip_read(in, cert_authorities_len));
/* In the future we may have more advanced logic to match a set of configured certificates against
* The cert authorities extension and the signature algorithms advertised.
* For now, this will just set the only certificate configured.
*/
- GUARD(s2n_set_cert_chain_as_client(conn));
+ POSIX_GUARD(s2n_set_cert_chain_as_client(conn));
return 0;
}
@@ -159,9 +146,9 @@ int s2n_tls13_cert_req_send(struct s2n_connection *conn)
struct s2n_stuffer *out = &conn->handshake.io;
/* Write 0 length request context https://tools.ietf.org/html/rfc8446#section-4.3.2 */
- GUARD(s2n_stuffer_write_uint8(out, 0));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, 0));
- GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_CERT_REQ, conn, out));
+ POSIX_GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_CERT_REQ, conn, out));
return S2N_SUCCESS;
}
@@ -174,24 +161,24 @@ int s2n_cert_req_send(struct s2n_connection *conn)
if (conn->config->cert_req_dss_legacy_compat_enabled) {
client_cert_preference_list_size = sizeof(s2n_cert_type_preference_list_legacy_dss);
}
- GUARD(s2n_stuffer_write_uint8(out, client_cert_preference_list_size));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, client_cert_preference_list_size));
for (int i = 0; i < client_cert_preference_list_size; i++) {
if (conn->config->cert_req_dss_legacy_compat_enabled) {
- GUARD(s2n_stuffer_write_uint8(out, s2n_cert_type_preference_list_legacy_dss[i]));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, s2n_cert_type_preference_list_legacy_dss[i]));
} else {
- GUARD(s2n_stuffer_write_uint8(out, s2n_cert_type_preference_list[i]));
+ POSIX_GUARD(s2n_stuffer_write_uint8(out, s2n_cert_type_preference_list[i]));
}
}
if (conn->actual_protocol_version == S2N_TLS12) {
- GUARD(s2n_send_supported_sig_scheme_list(conn, out));
+ POSIX_GUARD(s2n_send_supported_sig_scheme_list(conn, out));
}
/* RFC 5246 7.4.4 - If the certificate_authorities list is empty, then the
* client MAY send any certificate of the appropriate ClientCertificateType */
uint16_t acceptable_cert_authorities_len = 0;
- GUARD(s2n_stuffer_write_uint16(out, acceptable_cert_authorities_len));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, acceptable_cert_authorities_len));
return 0;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_server_extensions.c b/contrib/restricted/aws/s2n/tls/s2n_server_extensions.c
index 0fc8f6bb15..b604556009 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_server_extensions.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_server_extensions.c
@@ -29,10 +29,12 @@ int s2n_server_extensions_send(struct s2n_connection *conn, struct s2n_stuffer *
{
uint32_t data_available_before_extensions = s2n_stuffer_data_available(out);
- if (conn->actual_protocol_version >= S2N_TLS13) {
- GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_SERVER_HELLO_TLS13, conn, out));
+ if (s2n_is_hello_retry_message(conn)) {
+ POSIX_GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_HELLO_RETRY_REQUEST, conn, out));
+ } else if (conn->actual_protocol_version >= S2N_TLS13) {
+ POSIX_GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_SERVER_HELLO_TLS13, conn, out));
} else {
- GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_SERVER_HELLO_DEFAULT, conn, out));
+ POSIX_GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_SERVER_HELLO_DEFAULT, conn, out));
}
/* The ServerHello extension list size (uint16_t) is NOT written if the list is empty.
@@ -45,7 +47,7 @@ int s2n_server_extensions_send(struct s2n_connection *conn, struct s2n_stuffer *
* so will never produce an empty list.
*/
if(s2n_stuffer_data_available(out) - data_available_before_extensions == S2N_EMPTY_EXTENSION_LIST_SIZE) {
- GUARD(s2n_stuffer_wipe_n(out, S2N_EMPTY_EXTENSION_LIST_SIZE));
+ POSIX_GUARD(s2n_stuffer_wipe_n(out, S2N_EMPTY_EXTENSION_LIST_SIZE));
}
return S2N_SUCCESS;
@@ -54,18 +56,20 @@ int s2n_server_extensions_send(struct s2n_connection *conn, struct s2n_stuffer *
int s2n_server_extensions_recv(struct s2n_connection *conn, struct s2n_stuffer *in)
{
s2n_parsed_extensions_list parsed_extension_list = { 0 };
- GUARD(s2n_extension_list_parse(in, &parsed_extension_list));
+ POSIX_GUARD(s2n_extension_list_parse(in, &parsed_extension_list));
/* Process supported_versions first so that we know which extensions list to use.
* - If the supported_versions extension exists, then it will set server_protocol_version.
* - If the supported_versions extension does not exist, then the server_protocol_version will remain
* unknown and we will use the default list of allowed extension types. */
- GUARD(s2n_extension_process(&s2n_server_supported_versions_extension, conn, &parsed_extension_list));
+ POSIX_GUARD(s2n_extension_process(&s2n_server_supported_versions_extension, conn, &parsed_extension_list));
- if (conn->server_protocol_version >= S2N_TLS13) {
- GUARD(s2n_extension_list_process(S2N_EXTENSION_LIST_SERVER_HELLO_TLS13, conn, &parsed_extension_list));
+ if (s2n_is_hello_retry_message(conn)) {
+ POSIX_GUARD(s2n_extension_list_process(S2N_EXTENSION_LIST_HELLO_RETRY_REQUEST, conn, &parsed_extension_list));
+ } else if (conn->server_protocol_version >= S2N_TLS13) {
+ POSIX_GUARD(s2n_extension_list_process(S2N_EXTENSION_LIST_SERVER_HELLO_TLS13, conn, &parsed_extension_list));
} else {
- GUARD(s2n_extension_list_process(S2N_EXTENSION_LIST_SERVER_HELLO_DEFAULT, conn, &parsed_extension_list));
+ POSIX_GUARD(s2n_extension_list_process(S2N_EXTENSION_LIST_SERVER_HELLO_DEFAULT, conn, &parsed_extension_list));
}
return S2N_SUCCESS;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_server_finished.c b/contrib/restricted/aws/s2n/tls/s2n_server_finished.c
index 156641dd14..d7e57a222b 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_server_finished.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_server_finished.c
@@ -37,7 +37,7 @@ int s2n_server_finished_recv(struct s2n_connection *conn)
}
uint8_t *their_version = s2n_stuffer_raw_read(&conn->handshake.io, length);
- notnull_check(their_version);
+ POSIX_ENSURE_REF(their_version);
S2N_ERROR_IF(!s2n_constant_time_equals(our_version, their_version, length), S2N_ERR_BAD_MESSAGE);
@@ -50,7 +50,7 @@ int s2n_server_finished_send(struct s2n_connection *conn)
int length = S2N_TLS_FINISHED_LEN;
/* Compute the finished message */
- GUARD(s2n_prf_server_finished(conn));
+ POSIX_GUARD(s2n_prf_server_finished(conn));
our_version = conn->handshake.server_finished;
@@ -58,17 +58,17 @@ int s2n_server_finished_send(struct s2n_connection *conn)
length = S2N_SSL_FINISHED_LEN;
}
- GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, our_version, length));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, our_version, length));
/* Zero the sequence number */
struct s2n_blob seq = {.data = conn->secure.server_sequence_number,.size = S2N_TLS_SEQUENCE_NUM_LEN };
- GUARD(s2n_blob_zero(&seq));
+ POSIX_GUARD(s2n_blob_zero(&seq));
/* Update the secure state to active, and point the client at the active state */
conn->server = &conn->secure;
- if (IS_RESUMPTION_HANDSHAKE(conn->handshake.handshake_type)) {
- GUARD(s2n_prf_key_expansion(conn));
+ if (s2n_connection_is_session_resumed(conn)) {
+ POSIX_GUARD(s2n_prf_key_expansion(conn));
}
return 0;
@@ -76,7 +76,7 @@ int s2n_server_finished_send(struct s2n_connection *conn)
int s2n_tls13_server_finished_recv(struct s2n_connection *conn) {
- eq_check(conn->actual_protocol_version, S2N_TLS13);
+ POSIX_ENSURE_EQ(conn->actual_protocol_version, S2N_TLS13);
uint8_t length = s2n_stuffer_data_available(&conn->handshake.io);
S2N_ERROR_IF(length == 0, S2N_ERR_BAD_MESSAGE);
@@ -89,43 +89,45 @@ int s2n_tls13_server_finished_recv(struct s2n_connection *conn) {
s2n_tls13_connection_keys(keys, conn);
/* get transcript hash */
- struct s2n_hash_state hash_state = {0};
- GUARD(s2n_handshake_get_hash_state(conn, keys.hash_algorithm, &hash_state));
+ POSIX_ENSURE_REF(conn->handshake.hashes);
+ struct s2n_hash_state *hash_state = &conn->handshake.hashes->hash_workspace;
+ POSIX_GUARD_RESULT(s2n_handshake_copy_hash_state(conn, keys.hash_algorithm, hash_state));
/* look up finished secret key */
struct s2n_blob finished_key = {0};
- GUARD(s2n_blob_init(&finished_key, conn->handshake.server_finished, keys.size));
+ POSIX_GUARD(s2n_blob_init(&finished_key, conn->handshake.server_finished, keys.size));
/* generate the hashed message authenticated code */
s2n_tls13_key_blob(server_finished_mac, keys.size);
- GUARD(s2n_tls13_calculate_finished_mac(&keys, &finished_key, &hash_state, &server_finished_mac));
+ POSIX_GUARD(s2n_tls13_calculate_finished_mac(&keys, &finished_key, hash_state, &server_finished_mac));
/* compare mac with received message */
- GUARD(s2n_tls13_mac_verify(&keys, &server_finished_mac, &wire_finished_mac));
+ POSIX_GUARD(s2n_tls13_mac_verify(&keys, &server_finished_mac, &wire_finished_mac));
return 0;
}
int s2n_tls13_server_finished_send(struct s2n_connection *conn) {
- eq_check(conn->actual_protocol_version, S2N_TLS13);
+ POSIX_ENSURE_EQ(conn->actual_protocol_version, S2N_TLS13);
/* get tls13 keys */
s2n_tls13_connection_keys(keys, conn);
/* get transcript hash */
- struct s2n_hash_state hash_state = {0};
- GUARD(s2n_handshake_get_hash_state(conn, keys.hash_algorithm, &hash_state));
+ POSIX_ENSURE_REF(conn->handshake.hashes);
+ struct s2n_hash_state *hash_state = &conn->handshake.hashes->hash_workspace;
+ POSIX_GUARD_RESULT(s2n_handshake_copy_hash_state(conn, keys.hash_algorithm, hash_state));
/* look up finished secret key */
struct s2n_blob finished_key = {0};
- GUARD(s2n_blob_init(&finished_key, conn->handshake.server_finished, keys.size));
+ POSIX_GUARD(s2n_blob_init(&finished_key, conn->handshake.server_finished, keys.size));
/* generate the hashed message authenticated code */
s2n_tls13_key_blob(server_finished_mac, keys.size);
- GUARD(s2n_tls13_calculate_finished_mac(&keys, &finished_key, &hash_state, &server_finished_mac));
+ POSIX_GUARD(s2n_tls13_calculate_finished_mac(&keys, &finished_key, hash_state, &server_finished_mac));
/* write to handshake io */
- GUARD(s2n_stuffer_write(&conn->handshake.io, &server_finished_mac));
+ POSIX_GUARD(s2n_stuffer_write(&conn->handshake.io, &server_finished_mac));
return 0;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_server_hello.c b/contrib/restricted/aws/s2n/tls/s2n_server_hello.c
index 010c06547a..632f745478 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_server_hello.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_server_hello.c
@@ -15,7 +15,7 @@
#include <sys/param.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include <time.h>
#include "crypto/s2n_fips.h"
@@ -31,6 +31,7 @@
#include "tls/s2n_tls13.h"
#include "tls/s2n_security_policies.h"
#include "tls/s2n_tls13_handshake.h"
+#include "tls/s2n_tls13_key_schedule.h"
#include "stuffer/s2n_stuffer.h"
@@ -51,26 +52,26 @@ const uint8_t tls11_downgrade_protection_bytes[] = {
};
static int s2n_hello_retry_validate(struct s2n_connection *conn) {
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
- ENSURE_POSIX(memcmp(hello_retry_req_random, conn->secure.server_random, S2N_TLS_RANDOM_DATA_LEN) == 0,
+ POSIX_ENSURE(memcmp(hello_retry_req_random, conn->handshake_params.server_random, S2N_TLS_RANDOM_DATA_LEN) == 0,
S2N_ERR_INVALID_HELLO_RETRY);
return S2N_SUCCESS;
}
static int s2n_client_detect_downgrade_mechanism(struct s2n_connection *conn) {
- notnull_check(conn);
- uint8_t *downgrade_bytes = &conn->secure.server_random[S2N_TLS_RANDOM_DATA_LEN - S2N_DOWNGRADE_PROTECTION_SIZE];
+ POSIX_ENSURE_REF(conn);
+ uint8_t *downgrade_bytes = &conn->handshake_params.server_random[S2N_TLS_RANDOM_DATA_LEN - S2N_DOWNGRADE_PROTECTION_SIZE];
/* Detect downgrade attacks according to RFC 8446 section 4.1.3 */
if (conn->client_protocol_version == S2N_TLS13 && conn->server_protocol_version == S2N_TLS12) {
if (s2n_constant_time_equals(downgrade_bytes, tls12_downgrade_protection_bytes, S2N_DOWNGRADE_PROTECTION_SIZE)) {
- S2N_ERROR(S2N_ERR_PROTOCOL_DOWNGRADE_DETECTED);
+ POSIX_BAIL(S2N_ERR_PROTOCOL_DOWNGRADE_DETECTED);
}
} else if (conn->client_protocol_version == S2N_TLS13 && conn->server_protocol_version <= S2N_TLS11) {
if (s2n_constant_time_equals(downgrade_bytes, tls11_downgrade_protection_bytes, S2N_DOWNGRADE_PROTECTION_SIZE)) {
- S2N_ERROR(S2N_ERR_PROTOCOL_DOWNGRADE_DETECTED);
+ POSIX_BAIL(S2N_ERR_PROTOCOL_DOWNGRADE_DETECTED);
}
}
@@ -78,16 +79,16 @@ static int s2n_client_detect_downgrade_mechanism(struct s2n_connection *conn) {
}
static int s2n_server_add_downgrade_mechanism(struct s2n_connection *conn) {
- notnull_check(conn);
- uint8_t *downgrade_bytes = &conn->secure.server_random[S2N_TLS_RANDOM_DATA_LEN - S2N_DOWNGRADE_PROTECTION_SIZE];
+ POSIX_ENSURE_REF(conn);
+ uint8_t *downgrade_bytes = &conn->handshake_params.server_random[S2N_TLS_RANDOM_DATA_LEN - S2N_DOWNGRADE_PROTECTION_SIZE];
/* Protect against downgrade attacks according to RFC 8446 section 4.1.3 */
if (conn->server_protocol_version >= S2N_TLS13 && conn->actual_protocol_version == S2N_TLS12) {
/* TLS1.3 servers MUST use a special random value when negotiating TLS1.2 */
- memcpy_check(downgrade_bytes, tls12_downgrade_protection_bytes, S2N_DOWNGRADE_PROTECTION_SIZE);
+ POSIX_CHECKED_MEMCPY(downgrade_bytes, tls12_downgrade_protection_bytes, S2N_DOWNGRADE_PROTECTION_SIZE);
} else if (conn->server_protocol_version >= S2N_TLS13 && conn->actual_protocol_version <= S2N_TLS11) {
/* TLS1.3 servers MUST, use a special random value when negotiating TLS1.1 or below */
- memcpy_check(downgrade_bytes, tls11_downgrade_protection_bytes, S2N_DOWNGRADE_PROTECTION_SIZE);
+ POSIX_CHECKED_MEMCPY(downgrade_bytes, tls11_downgrade_protection_bytes, S2N_DOWNGRADE_PROTECTION_SIZE);
}
return 0;
@@ -95,7 +96,7 @@ static int s2n_server_add_downgrade_mechanism(struct s2n_connection *conn) {
static int s2n_server_hello_parse(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
struct s2n_stuffer *in = &conn->handshake.io;
uint8_t compression_method;
@@ -103,50 +104,70 @@ static int s2n_server_hello_parse(struct s2n_connection *conn)
uint8_t protocol_version[S2N_TLS_PROTOCOL_VERSION_LEN];
uint8_t session_id[S2N_TLS_SESSION_ID_MAX_LEN];
- GUARD(s2n_stuffer_read_bytes(in, protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
- GUARD(s2n_stuffer_read_bytes(in, conn->secure.server_random, S2N_TLS_RANDOM_DATA_LEN));
+ POSIX_GUARD(s2n_stuffer_read_bytes(in, protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
+ POSIX_GUARD(s2n_stuffer_read_bytes(in, conn->handshake_params.server_random, S2N_TLS_RANDOM_DATA_LEN));
/* If the client receives a second HelloRetryRequest in the same connection, it MUST send an error. */
if (s2n_hello_retry_validate(conn) == S2N_SUCCESS) {
- ENSURE_POSIX(!s2n_is_hello_retry_handshake(conn), S2N_ERR_INVALID_HELLO_RETRY);
- GUARD(s2n_set_hello_retry_required(conn));
+ POSIX_ENSURE(!s2n_is_hello_retry_handshake(conn), S2N_ERR_INVALID_HELLO_RETRY);
+ POSIX_GUARD(s2n_set_hello_retry_required(conn));
}
- GUARD(s2n_stuffer_read_uint8(in, &session_id_len));
+ POSIX_GUARD(s2n_stuffer_read_uint8(in, &session_id_len));
S2N_ERROR_IF(session_id_len > S2N_TLS_SESSION_ID_MAX_LEN, S2N_ERR_BAD_MESSAGE);
- GUARD(s2n_stuffer_read_bytes(in, session_id, session_id_len));
+ POSIX_GUARD(s2n_stuffer_read_bytes(in, session_id, session_id_len));
uint8_t *cipher_suite_wire = s2n_stuffer_raw_read(in, S2N_TLS_CIPHER_SUITE_LEN);
- notnull_check(cipher_suite_wire);
+ POSIX_ENSURE_REF(cipher_suite_wire);
- GUARD(s2n_stuffer_read_uint8(in, &compression_method));
+ POSIX_GUARD(s2n_stuffer_read_uint8(in, &compression_method));
S2N_ERROR_IF(compression_method != S2N_TLS_COMPRESSION_METHOD_NULL, S2N_ERR_BAD_MESSAGE);
- GUARD(s2n_server_extensions_recv(conn, in));
+ bool session_ids_match = session_id_len != 0 && session_id_len == conn->session_id_len
+ && memcmp(session_id, conn->session_id, session_id_len) == 0;
+ if (!session_ids_match) {
+ conn->ems_negotiated = false;
+ }
+
+ POSIX_GUARD(s2n_server_extensions_recv(conn, in));
if (conn->server_protocol_version >= S2N_TLS13) {
- S2N_ERROR_IF(session_id_len != conn->session_id_len || memcmp(session_id, conn->session_id, session_id_len), S2N_ERR_BAD_MESSAGE);
+ POSIX_ENSURE(session_ids_match || (session_id_len == 0 && conn->session_id_len == 0), S2N_ERR_BAD_MESSAGE);
conn->actual_protocol_version = conn->server_protocol_version;
- GUARD(s2n_set_cipher_as_client(conn, cipher_suite_wire));
+ POSIX_GUARD(s2n_set_cipher_as_client(conn, cipher_suite_wire));
} else {
conn->server_protocol_version = (uint8_t)(protocol_version[0] * 10) + protocol_version[1];
- S2N_ERROR_IF(s2n_client_detect_downgrade_mechanism(conn), S2N_ERR_PROTOCOL_DOWNGRADE_DETECTED);
- ENSURE_POSIX(!conn->config->quic_enabled, S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
+ POSIX_ENSURE(!s2n_client_detect_downgrade_mechanism(conn), S2N_ERR_PROTOCOL_DOWNGRADE_DETECTED);
+ POSIX_ENSURE(!s2n_connection_is_quic_enabled(conn), S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
+
+ /*
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-D.3
+ *# A client that attempts to send 0-RTT data MUST fail a connection if
+ *# it receives a ServerHello with TLS 1.2 or older.
+ */
+ POSIX_ENSURE(conn->early_data_state != S2N_EARLY_DATA_REQUESTED, S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
const struct s2n_security_policy *security_policy;
- GUARD(s2n_connection_get_security_policy(conn, &security_policy));
+ POSIX_GUARD(s2n_connection_get_security_policy(conn, &security_policy));
if (conn->server_protocol_version < security_policy->minimum_protocol_version
|| conn->server_protocol_version > conn->client_protocol_version) {
- GUARD(s2n_queue_reader_unsupported_protocol_version_alert(conn));
- S2N_ERROR(S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
+ POSIX_GUARD(s2n_queue_reader_unsupported_protocol_version_alert(conn));
+ POSIX_BAIL(S2N_ERR_PROTOCOL_VERSION_UNSUPPORTED);
}
uint8_t actual_protocol_version = MIN(conn->server_protocol_version, conn->client_protocol_version);
- /* Use the session state if server sent same session id as client sent in client hello */
- if (session_id_len != 0 && session_id_len == conn->session_id_len
- && !memcmp(session_id, conn->session_id, session_id_len)) {
+
+ /*
+ *= https://tools.ietf.org/rfc/rfc5077#section-3.4
+ *# If the server accepts the ticket
+ *# and the Session ID is not empty, then it MUST respond with the same
+ *# Session ID present in the ClientHello. This allows the client to
+ *# easily differentiate when the server is resuming a session from when
+ *# it is falling back to a full handshake.
+ */
+ if (session_ids_match) {
/* check if the resumed session state is valid */
S2N_ERROR_IF(conn->actual_protocol_version != actual_protocol_version, S2N_ERR_BAD_MESSAGE);
S2N_ERROR_IF(memcmp(conn->secure.cipher_suite->iana_value, cipher_suite_wire, S2N_TLS_CIPHER_SUITE_LEN) != 0, S2N_ERR_BAD_MESSAGE);
@@ -155,54 +176,64 @@ static int s2n_server_hello_parse(struct s2n_connection *conn)
conn->client_session_resumed = 1;
} else {
conn->session_id_len = session_id_len;
- memcpy_check(conn->session_id, session_id, session_id_len);
+ POSIX_CHECKED_MEMCPY(conn->session_id, session_id, session_id_len);
conn->actual_protocol_version = actual_protocol_version;
- GUARD(s2n_set_cipher_as_client(conn, cipher_suite_wire));
+ POSIX_GUARD(s2n_set_cipher_as_client(conn, cipher_suite_wire));
/* Erase master secret which might have been set for session resumption */
- memset_check((uint8_t *)conn->secure.master_secret, 0, S2N_TLS_SECRET_LEN);
+ POSIX_CHECKED_MEMSET((uint8_t *)conn->secrets.tls12.master_secret, 0, S2N_TLS_SECRET_LEN);
/* Erase client session ticket which might have been set for session resumption */
- GUARD(s2n_free(&conn->client_ticket));
+ POSIX_GUARD(s2n_free(&conn->client_ticket));
}
}
+ /* If it is not possible to accept early data on this connection
+ * (for example, because no PSK was negotiated) we need to reject early data now.
+ * Otherwise, early data logic may make certain invalid assumptions about the
+ * state of the connection (for example, that the prf is the early data prf).
+ */
+ POSIX_GUARD_RESULT(s2n_early_data_accept_or_reject(conn));
+ if (conn->early_data_state == S2N_EARLY_DATA_REJECTED) {
+ POSIX_GUARD_RESULT(s2n_tls13_key_schedule_reset(conn));
+ }
+
return 0;
}
int s2n_server_hello_recv(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
/* Read the message off the wire */
- GUARD(s2n_server_hello_parse(conn));
+ POSIX_GUARD(s2n_server_hello_parse(conn));
conn->actual_protocol_version_established = 1;
- GUARD(s2n_conn_set_handshake_type(conn));
+ POSIX_GUARD(s2n_conn_set_handshake_type(conn));
/* If this is a HelloRetryRequest, we don't process the ServerHello.
* Instead we proceed with retry logic. */
if (s2n_is_hello_retry_message(conn)) {
- GUARD(s2n_server_hello_retry_recv(conn));
+ POSIX_GUARD(s2n_server_hello_retry_recv(conn));
return 0;
}
- if (IS_RESUMPTION_HANDSHAKE(conn->handshake.handshake_type)) {
- GUARD(s2n_prf_key_expansion(conn));
+ if (conn->actual_protocol_version < S2N_TLS13 && s2n_connection_is_session_resumed(conn)) {
+ POSIX_GUARD(s2n_prf_key_expansion(conn));
}
/* Choose a default signature scheme */
- GUARD(s2n_choose_default_sig_scheme(conn, &conn->secure.conn_sig_scheme));
+ POSIX_GUARD(s2n_choose_default_sig_scheme(conn, &conn->handshake_params.conn_sig_scheme, S2N_SERVER));
/* Update the required hashes for this connection */
- GUARD(s2n_conn_update_required_handshake_hashes(conn));
+ POSIX_GUARD(s2n_conn_update_required_handshake_hashes(conn));
return 0;
}
int s2n_server_hello_write_message(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
/* The actual_protocol_version is set while processing the CLIENT_HELLO message, so
* it could be S2N_TLS13. SERVER_HELLO should always respond with the legacy version.
@@ -212,38 +243,38 @@ int s2n_server_hello_write_message(struct s2n_connection *conn)
protocol_version[0] = (uint8_t)(legacy_protocol_version / 10);
protocol_version[1] = (uint8_t)(legacy_protocol_version % 10);
- GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
- GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, conn->secure.server_random, S2N_TLS_RANDOM_DATA_LEN));
- GUARD(s2n_stuffer_write_uint8(&conn->handshake.io, conn->session_id_len));
- GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, conn->session_id, conn->session_id_len));
- GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, conn->secure.cipher_suite->iana_value, S2N_TLS_CIPHER_SUITE_LEN));
- GUARD(s2n_stuffer_write_uint8(&conn->handshake.io, S2N_TLS_COMPRESSION_METHOD_NULL));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, protocol_version, S2N_TLS_PROTOCOL_VERSION_LEN));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, conn->handshake_params.server_random, S2N_TLS_RANDOM_DATA_LEN));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&conn->handshake.io, conn->session_id_len));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, conn->session_id, conn->session_id_len));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&conn->handshake.io, conn->secure.cipher_suite->iana_value, S2N_TLS_CIPHER_SUITE_LEN));
+ POSIX_GUARD(s2n_stuffer_write_uint8(&conn->handshake.io, S2N_TLS_COMPRESSION_METHOD_NULL));
return 0;
}
int s2n_server_hello_send(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
struct s2n_stuffer server_random = {0};
struct s2n_blob b = {0};
- GUARD(s2n_blob_init(&b, conn->secure.server_random, S2N_TLS_RANDOM_DATA_LEN));
+ POSIX_GUARD(s2n_blob_init(&b, conn->handshake_params.server_random, S2N_TLS_RANDOM_DATA_LEN));
/* Create the server random data */
- GUARD(s2n_stuffer_init(&server_random, &b));
+ POSIX_GUARD(s2n_stuffer_init(&server_random, &b));
struct s2n_blob rand_data = {0};
- GUARD(s2n_blob_init(&rand_data, s2n_stuffer_raw_write(&server_random, S2N_TLS_RANDOM_DATA_LEN), S2N_TLS_RANDOM_DATA_LEN));
- notnull_check(rand_data.data);
- GUARD_AS_POSIX(s2n_get_public_random_data(&rand_data));
+ POSIX_GUARD(s2n_blob_init(&rand_data, s2n_stuffer_raw_write(&server_random, S2N_TLS_RANDOM_DATA_LEN), S2N_TLS_RANDOM_DATA_LEN));
+ POSIX_ENSURE_REF(rand_data.data);
+ POSIX_GUARD_RESULT(s2n_get_public_random_data(&rand_data));
/* Add a downgrade detection mechanism if required */
- GUARD(s2n_server_add_downgrade_mechanism(conn));
+ POSIX_GUARD(s2n_server_add_downgrade_mechanism(conn));
- GUARD(s2n_server_hello_write_message(conn));
+ POSIX_GUARD(s2n_server_hello_write_message(conn));
- GUARD(s2n_server_extensions_send(conn, &conn->handshake.io));
+ POSIX_GUARD(s2n_server_extensions_send(conn, &conn->handshake.io));
conn->actual_protocol_version_established = 1;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_server_hello_retry.c b/contrib/restricted/aws/s2n/tls/s2n_server_hello_retry.c
index a6dda9edf8..c305289100 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_server_hello_retry.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_server_hello_retry.c
@@ -35,50 +35,50 @@ uint8_t hello_retry_req_random[S2N_TLS_RANDOM_DATA_LEN] = {
static int s2n_conn_reset_retry_values(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
/* Reset handshake values */
conn->handshake.client_hello_received = 0;
/* Reset client hello state */
- GUARD(s2n_stuffer_wipe(&conn->client_hello.raw_message));
- GUARD(s2n_stuffer_resize(&conn->client_hello.raw_message, 0));
- GUARD(s2n_client_hello_free(&conn->client_hello));
- GUARD(s2n_stuffer_growable_alloc(&conn->client_hello.raw_message, 0));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->client_hello.raw_message));
+ POSIX_GUARD(s2n_stuffer_resize(&conn->client_hello.raw_message, 0));
+ POSIX_GUARD(s2n_client_hello_free(&conn->client_hello));
+ POSIX_GUARD(s2n_stuffer_growable_alloc(&conn->client_hello.raw_message, 0));
return 0;
}
int s2n_server_hello_retry_send(struct s2n_connection *conn)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
- memcpy_check(conn->secure.server_random, hello_retry_req_random, S2N_TLS_RANDOM_DATA_LEN);
+ POSIX_CHECKED_MEMCPY(conn->handshake_params.server_random, hello_retry_req_random, S2N_TLS_RANDOM_DATA_LEN);
- GUARD(s2n_server_hello_write_message(conn));
+ POSIX_GUARD(s2n_server_hello_write_message(conn));
/* Write the extensions */
- GUARD(s2n_server_extensions_send(conn, &conn->handshake.io));
+ POSIX_GUARD(s2n_server_extensions_send(conn, &conn->handshake.io));
/* Update transcript */
- GUARD(s2n_server_hello_retry_recreate_transcript(conn));
- GUARD(s2n_conn_reset_retry_values(conn));
+ POSIX_GUARD(s2n_server_hello_retry_recreate_transcript(conn));
+ POSIX_GUARD(s2n_conn_reset_retry_values(conn));
return 0;
}
int s2n_server_hello_retry_recv(struct s2n_connection *conn)
{
- notnull_check(conn);
- ENSURE_POSIX(conn->actual_protocol_version >= S2N_TLS13, S2N_ERR_INVALID_HELLO_RETRY);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE(conn->actual_protocol_version >= S2N_TLS13, S2N_ERR_INVALID_HELLO_RETRY);
const struct s2n_ecc_preferences *ecc_pref = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
- notnull_check(ecc_pref);
+ POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_pref));
+ POSIX_ENSURE_REF(ecc_pref);
const struct s2n_kem_preferences *kem_pref = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
- notnull_check(kem_pref);
+ POSIX_GUARD(s2n_connection_get_kem_preferences(conn, &kem_pref));
+ POSIX_ENSURE_REF(kem_pref);
/* Upon receipt of the HelloRetryRequest, the client MUST verify that:
* (1) the selected_group field corresponds to a group
@@ -88,44 +88,34 @@ int s2n_server_hello_retry_recv(struct s2n_connection *conn)
* in the "key_share" extension in the original ClientHello.
* If either of these checks fails, then the client MUST abort the handshake. */
- bool match_found = false;
-
- const struct s2n_ecc_named_curve *named_curve = conn->secure.server_ecc_evp_params.negotiated_curve;
- const struct s2n_kem_group *kem_group = conn->secure.server_kem_group_params.kem_group;
+ const struct s2n_ecc_named_curve *named_curve = conn->kex_params.server_ecc_evp_params.negotiated_curve;
+ const struct s2n_kem_group *kem_group = conn->kex_params.server_kem_group_params.kem_group;
/* Boolean XOR check: exactly one of {named_curve, kem_group} should be non-null. */
- ENSURE_POSIX( (named_curve != NULL) != (kem_group != NULL), S2N_ERR_INVALID_HELLO_RETRY);
+ POSIX_ENSURE( (named_curve != NULL) != (kem_group != NULL), S2N_ERR_INVALID_HELLO_RETRY);
+ bool new_key_share_requested = false;
if (named_curve != NULL) {
- for (size_t i = 0; i < ecc_pref->count; i++) {
- if (ecc_pref->ecc_curves[i] == named_curve) {
- match_found = true;
- ENSURE_POSIX(conn->secure.client_ecc_evp_params[i].evp_pkey == NULL, S2N_ERR_INVALID_HELLO_RETRY);
- break;
- }
- }
+ new_key_share_requested = (named_curve != conn->kex_params.client_ecc_evp_params.negotiated_curve);
}
-
if (kem_group != NULL) {
/* If PQ is disabled, the client should not have sent any PQ IDs
* in the supported_groups list of the initial ClientHello */
- ENSURE_POSIX(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
-
- for (size_t i = 0; i < kem_pref->tls13_kem_group_count; i++) {
- if (kem_pref->tls13_kem_groups[i] == kem_group) {
- match_found = true;
- ENSURE_POSIX(conn->secure.client_kem_group_params[i].kem_params.private_key.data == NULL,
- S2N_ERR_INVALID_HELLO_RETRY);
- ENSURE_POSIX(conn->secure.client_kem_group_params[i].ecc_params.evp_pkey == NULL,
- S2N_ERR_INVALID_HELLO_RETRY);
- }
- }
+ POSIX_ENSURE(s2n_pq_is_enabled(), S2N_ERR_PQ_DISABLED);
+ new_key_share_requested = (kem_group != conn->kex_params.client_kem_group_params.kem_group);
}
- ENSURE_POSIX(match_found, S2N_ERR_INVALID_HELLO_RETRY);
+ /*
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.1.4
+ *# Clients MUST abort the handshake with an
+ *# "illegal_parameter" alert if the HelloRetryRequest would not result
+ *# in any change in the ClientHello.
+ */
+ POSIX_ENSURE((conn->early_data_state == S2N_EARLY_DATA_REJECTED) || new_key_share_requested,
+ S2N_ERR_INVALID_HELLO_RETRY);
/* Update transcript hash */
- GUARD(s2n_server_hello_retry_recreate_transcript(conn));
+ POSIX_GUARD(s2n_server_hello_retry_recreate_transcript(conn));
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_server_key_exchange.c b/contrib/restricted/aws/s2n/tls/s2n_server_key_exchange.c
index 9116868110..b10b300e57 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_server_key_exchange.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_server_key_exchange.c
@@ -13,12 +13,11 @@
* permissions and limitations under the License.
*/
-#include <s2n.h>
+#include "api/s2n.h"
#include "error/s2n_errno.h"
#include "tls/s2n_async_pkey.h"
-#include "tls/s2n_tls_digest_preferences.h"
#include "tls/s2n_kem.h"
#include "tls/s2n_kex.h"
#include "tls/s2n_cipher_suites.h"
@@ -38,50 +37,50 @@ static int s2n_server_key_send_write_signature(struct s2n_connection *conn, stru
int s2n_server_key_recv(struct s2n_connection *conn)
{
- notnull_check(conn);
- notnull_check(conn->secure.cipher_suite);
- notnull_check(conn->secure.cipher_suite->key_exchange_alg);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->secure.cipher_suite);
+ POSIX_ENSURE_REF(conn->secure.cipher_suite->key_exchange_alg);
+ POSIX_ENSURE_REF(conn->handshake.hashes);
- struct s2n_hash_state *signature_hash = &conn->secure.signature_hash;
+ struct s2n_hash_state *signature_hash = &conn->handshake.hashes->hash_workspace;
const struct s2n_kex *key_exchange = conn->secure.cipher_suite->key_exchange_alg;
struct s2n_stuffer *in = &conn->handshake.io;
struct s2n_blob data_to_verify = {0};
/* Read the KEX data */
struct s2n_kex_raw_server_data kex_data = {0};
- GUARD_AS_POSIX(s2n_kex_server_key_recv_read_data(key_exchange, conn, &data_to_verify, &kex_data));
+ POSIX_GUARD_RESULT(s2n_kex_server_key_recv_read_data(key_exchange, conn, &data_to_verify, &kex_data));
/* Add common signature data */
- struct s2n_signature_scheme active_sig_scheme;
+ struct s2n_signature_scheme *active_sig_scheme = &conn->handshake_params.conn_sig_scheme;
if (conn->actual_protocol_version == S2N_TLS12) {
/* Verify the SigScheme picked by the Server was in the preference list we sent (or is the default SigScheme) */
- GUARD(s2n_get_and_validate_negotiated_signature_scheme(conn, in, &active_sig_scheme));
- } else {
- active_sig_scheme = conn->secure.conn_sig_scheme;
+ POSIX_GUARD(s2n_get_and_validate_negotiated_signature_scheme(conn, in, active_sig_scheme));
}
- GUARD(s2n_hash_init(signature_hash, active_sig_scheme.hash_alg));
- GUARD(s2n_hash_update(signature_hash, conn->secure.client_random, S2N_TLS_RANDOM_DATA_LEN));
- GUARD(s2n_hash_update(signature_hash, conn->secure.server_random, S2N_TLS_RANDOM_DATA_LEN));
+
+ POSIX_GUARD(s2n_hash_init(signature_hash, active_sig_scheme->hash_alg));
+ POSIX_GUARD(s2n_hash_update(signature_hash, conn->handshake_params.client_random, S2N_TLS_RANDOM_DATA_LEN));
+ POSIX_GUARD(s2n_hash_update(signature_hash, conn->handshake_params.server_random, S2N_TLS_RANDOM_DATA_LEN));
/* Add KEX specific data */
- GUARD(s2n_hash_update(signature_hash, data_to_verify.data, data_to_verify.size));
+ POSIX_GUARD(s2n_hash_update(signature_hash, data_to_verify.data, data_to_verify.size));
/* Verify the signature */
uint16_t signature_length;
- GUARD(s2n_stuffer_read_uint16(in, &signature_length));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &signature_length));
struct s2n_blob signature = {.size = signature_length, .data = s2n_stuffer_raw_read(in, signature_length)};
- notnull_check(signature.data);
- gt_check(signature_length, 0);
+ POSIX_ENSURE_REF(signature.data);
+ POSIX_ENSURE_GT(signature_length, 0);
- S2N_ERROR_IF(s2n_pkey_verify(&conn->secure.server_public_key, active_sig_scheme.sig_alg,signature_hash, &signature) < 0,
+ S2N_ERROR_IF(s2n_pkey_verify(&conn->handshake_params.server_public_key, active_sig_scheme->sig_alg, signature_hash, &signature) < 0,
S2N_ERR_BAD_MESSAGE);
/* We don't need the key any more, so free it */
- GUARD(s2n_pkey_free(&conn->secure.server_public_key));
+ POSIX_GUARD(s2n_pkey_free(&conn->handshake_params.server_public_key));
/* Parse the KEX data into whatever form needed and save it to the connection object */
- GUARD_AS_POSIX(s2n_kex_server_key_recv_parse_data(key_exchange, conn, &kex_data));
+ POSIX_GUARD_RESULT(s2n_kex_server_key_recv_parse_data(key_exchange, conn, &kex_data));
return 0;
}
@@ -89,13 +88,13 @@ int s2n_ecdhe_server_key_recv_read_data(struct s2n_connection *conn, struct s2n_
{
struct s2n_stuffer *in = &conn->handshake.io;
- GUARD(s2n_ecc_evp_read_params(in, data_to_verify, &raw_server_data->ecdhe_data));
+ POSIX_GUARD(s2n_ecc_evp_read_params(in, data_to_verify, &raw_server_data->ecdhe_data));
return 0;
}
int s2n_ecdhe_server_key_recv_parse_data(struct s2n_connection *conn, struct s2n_kex_raw_server_data *raw_server_data)
{
- GUARD(s2n_ecc_evp_parse_params(&raw_server_data->ecdhe_data, &conn->secure.server_ecc_evp_params));
+ POSIX_GUARD(s2n_ecc_evp_parse_params(&raw_server_data->ecdhe_data, &conn->kex_params.server_ecc_evp_params));
return 0;
}
@@ -111,23 +110,23 @@ int s2n_dhe_server_key_recv_read_data(struct s2n_connection *conn, struct s2n_bl
/* Keep a copy to the start of the whole structure for the signature check */
data_to_verify->data = s2n_stuffer_raw_read(in, 0);
- notnull_check(data_to_verify->data);
+ POSIX_ENSURE_REF(data_to_verify->data);
/* Read each of the three elements in */
- GUARD(s2n_stuffer_read_uint16(in, &p_length));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &p_length));
dhe_data->p.size = p_length;
dhe_data->p.data = s2n_stuffer_raw_read(in, p_length);
- notnull_check(dhe_data->p.data);
+ POSIX_ENSURE_REF(dhe_data->p.data);
- GUARD(s2n_stuffer_read_uint16(in, &g_length));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &g_length));
dhe_data->g.size = g_length;
dhe_data->g.data = s2n_stuffer_raw_read(in, g_length);
- notnull_check(dhe_data->g.data);
+ POSIX_ENSURE_REF(dhe_data->g.data);
- GUARD(s2n_stuffer_read_uint16(in, &Ys_length));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &Ys_length));
dhe_data->Ys.size = Ys_length;
dhe_data->Ys.data = s2n_stuffer_raw_read(in, Ys_length);
- notnull_check(dhe_data->Ys.data);
+ POSIX_ENSURE_REF(dhe_data->Ys.data);
/* Now we know the total size of the structure */
data_to_verify->size = 2 + p_length + 2 + g_length + 2 + Ys_length;
@@ -139,7 +138,7 @@ int s2n_dhe_server_key_recv_parse_data(struct s2n_connection *conn, struct s2n_k
struct s2n_dhe_raw_server_points dhe_data = raw_server_data->dhe_data;
/* Copy the DH details */
- GUARD(s2n_dh_p_g_Ys_to_dh_params(&conn->secure.server_dh_params, &dhe_data.p, &dhe_data.g, &dhe_data.Ys));
+ POSIX_GUARD(s2n_dh_p_g_Ys_to_dh_params(&conn->kex_params.server_dh_params, &dhe_data.p, &dhe_data.g, &dhe_data.Ys));
return 0;
}
@@ -150,26 +149,26 @@ int s2n_kem_server_key_recv_read_data(struct s2n_connection *conn, struct s2n_bl
/* Keep a copy to the start of the whole structure for the signature check */
data_to_verify->data = s2n_stuffer_raw_read(in, 0);
- notnull_check(data_to_verify->data);
+ POSIX_ENSURE_REF(data_to_verify->data);
/* the server sends the KEM ID */
kem_data->kem_name.data = s2n_stuffer_raw_read(in, 2);
- notnull_check(kem_data->kem_name.data);
+ POSIX_ENSURE_REF(kem_data->kem_name.data);
kem_data->kem_name.size = 2;
struct s2n_stuffer kem_id_stuffer = { 0 };
uint8_t kem_id_arr[2];
kem_extension_size kem_id;
struct s2n_blob kem_id_blob = { .data = kem_id_arr, .size = s2n_array_len(kem_id_arr) };
- GUARD(s2n_stuffer_init(&kem_id_stuffer, &kem_id_blob));
- GUARD(s2n_stuffer_write(&kem_id_stuffer, &(kem_data->kem_name)));
- GUARD(s2n_stuffer_read_uint16(&kem_id_stuffer, &kem_id));
+ POSIX_GUARD(s2n_stuffer_init(&kem_id_stuffer, &kem_id_blob));
+ POSIX_GUARD(s2n_stuffer_write(&kem_id_stuffer, &(kem_data->kem_name)));
+ POSIX_GUARD(s2n_stuffer_read_uint16(&kem_id_stuffer, &kem_id));
- GUARD(s2n_get_kem_from_extension_id(kem_id, &(conn->secure.kem_params.kem)));
- GUARD(s2n_kem_recv_public_key(in, &(conn->secure.kem_params)));
+ POSIX_GUARD(s2n_get_kem_from_extension_id(kem_id, &(conn->kex_params.kem_params.kem)));
+ POSIX_GUARD(s2n_kem_recv_public_key(in, &(conn->kex_params.kem_params)));
- kem_data->raw_public_key.data = conn->secure.kem_params.public_key.data;
- kem_data->raw_public_key.size = conn->secure.kem_params.public_key.size;
+ kem_data->raw_public_key.data = conn->kex_params.kem_params.public_key.data;
+ kem_data->raw_public_key.size = conn->kex_params.kem_params.public_key.size;
data_to_verify->size = sizeof(kem_extension_size) + sizeof(kem_public_key_size) + kem_data->raw_public_key.size;
@@ -182,37 +181,37 @@ int s2n_kem_server_key_recv_parse_data(struct s2n_connection *conn, struct s2n_k
/* Check that the server's requested kem is supported by the client */
const struct s2n_kem_preferences *kem_preferences = NULL;
- GUARD(s2n_connection_get_kem_preferences(conn, &kem_preferences));
- notnull_check(kem_preferences);
+ POSIX_GUARD(s2n_connection_get_kem_preferences(conn, &kem_preferences));
+ POSIX_ENSURE_REF(kem_preferences);
const struct s2n_cipher_suite *cipher_suite = conn->secure.cipher_suite;
const struct s2n_kem *match = NULL;
S2N_ERROR_IF(s2n_choose_kem_with_peer_pref_list(cipher_suite->iana_value, &kem_data->kem_name, kem_preferences->kems,
kem_preferences->kem_count, &match) != 0, S2N_ERR_KEM_UNSUPPORTED_PARAMS);
- conn->secure.kem_params.kem = match;
+ conn->kex_params.kem_params.kem = match;
- S2N_ERROR_IF(kem_data->raw_public_key.size != conn->secure.kem_params.kem->public_key_length, S2N_ERR_BAD_MESSAGE);
+ S2N_ERROR_IF(kem_data->raw_public_key.size != conn->kex_params.kem_params.kem->public_key_length, S2N_ERR_BAD_MESSAGE);
return 0;
}
int s2n_hybrid_server_key_recv_read_data(struct s2n_connection *conn, struct s2n_blob *total_data_to_verify, struct s2n_kex_raw_server_data *raw_server_data)
{
- notnull_check(conn);
- notnull_check(conn->secure.cipher_suite);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->secure.cipher_suite);
const struct s2n_kex *kex = conn->secure.cipher_suite->key_exchange_alg;
const struct s2n_kex *hybrid_kex_0 = kex->hybrid[0];
const struct s2n_kex *hybrid_kex_1 = kex->hybrid[1];
/* Keep a copy to the start of the whole structure for the signature check */
total_data_to_verify->data = s2n_stuffer_raw_read(&conn->handshake.io, 0);
- notnull_check(total_data_to_verify->data);
+ POSIX_ENSURE_REF(total_data_to_verify->data);
struct s2n_blob data_to_verify_0 = {0};
- GUARD_AS_POSIX(s2n_kex_server_key_recv_read_data(hybrid_kex_0, conn, &data_to_verify_0, raw_server_data));
+ POSIX_GUARD_RESULT(s2n_kex_server_key_recv_read_data(hybrid_kex_0, conn, &data_to_verify_0, raw_server_data));
struct s2n_blob data_to_verify_1 = {0};
- GUARD_AS_POSIX(s2n_kex_server_key_recv_read_data(hybrid_kex_1, conn, &data_to_verify_1, raw_server_data));
+ POSIX_GUARD_RESULT(s2n_kex_server_key_recv_read_data(hybrid_kex_1, conn, &data_to_verify_1, raw_server_data));
total_data_to_verify->size = data_to_verify_0.size + data_to_verify_1.size;
return 0;
@@ -220,43 +219,46 @@ int s2n_hybrid_server_key_recv_read_data(struct s2n_connection *conn, struct s2n
int s2n_hybrid_server_key_recv_parse_data(struct s2n_connection *conn, struct s2n_kex_raw_server_data *raw_server_data)
{
- notnull_check(conn);
- notnull_check(conn->secure.cipher_suite);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->secure.cipher_suite);
const struct s2n_kex *kex = conn->secure.cipher_suite->key_exchange_alg;
const struct s2n_kex *hybrid_kex_0 = kex->hybrid[0];
const struct s2n_kex *hybrid_kex_1 = kex->hybrid[1];
- GUARD_AS_POSIX(s2n_kex_server_key_recv_parse_data(hybrid_kex_0, conn, raw_server_data));
- GUARD_AS_POSIX(s2n_kex_server_key_recv_parse_data(hybrid_kex_1, conn, raw_server_data));
+ POSIX_GUARD_RESULT(s2n_kex_server_key_recv_parse_data(hybrid_kex_0, conn, raw_server_data));
+ POSIX_GUARD_RESULT(s2n_kex_server_key_recv_parse_data(hybrid_kex_1, conn, raw_server_data));
return 0;
}
int s2n_server_key_send(struct s2n_connection *conn)
{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->handshake.hashes);
+
S2N_ASYNC_PKEY_GUARD(conn);
- struct s2n_hash_state *signature_hash = &conn->secure.signature_hash;
+ struct s2n_hash_state *signature_hash = &conn->handshake.hashes->hash_workspace;
const struct s2n_kex *key_exchange = conn->secure.cipher_suite->key_exchange_alg;
struct s2n_stuffer *out = &conn->handshake.io;
struct s2n_blob data_to_sign = {0};
/* Call the negotiated key exchange method to send it's data */
- GUARD_AS_POSIX(s2n_kex_server_key_send(key_exchange, conn, &data_to_sign));
+ POSIX_GUARD_RESULT(s2n_kex_server_key_send(key_exchange, conn, &data_to_sign));
/* Add common signature data */
if (conn->actual_protocol_version == S2N_TLS12) {
- GUARD(s2n_stuffer_write_uint16(out, conn->secure.conn_sig_scheme.iana_value));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, conn->handshake_params.conn_sig_scheme.iana_value));
}
/* Add the random data to the hash */
- GUARD(s2n_hash_init(signature_hash, conn->secure.conn_sig_scheme.hash_alg));
- GUARD(s2n_hash_update(signature_hash, conn->secure.client_random, S2N_TLS_RANDOM_DATA_LEN));
- GUARD(s2n_hash_update(signature_hash, conn->secure.server_random, S2N_TLS_RANDOM_DATA_LEN));
+ POSIX_GUARD(s2n_hash_init(signature_hash, conn->handshake_params.conn_sig_scheme.hash_alg));
+ POSIX_GUARD(s2n_hash_update(signature_hash, conn->handshake_params.client_random, S2N_TLS_RANDOM_DATA_LEN));
+ POSIX_GUARD(s2n_hash_update(signature_hash, conn->handshake_params.server_random, S2N_TLS_RANDOM_DATA_LEN));
/* Add KEX specific data to the hash */
- GUARD(s2n_hash_update(signature_hash, data_to_sign.data, data_to_sign.size));
+ POSIX_GUARD(s2n_hash_update(signature_hash, data_to_sign.data, data_to_sign.size));
- S2N_ASYNC_PKEY_SIGN(conn, conn->secure.conn_sig_scheme.sig_alg, signature_hash, s2n_server_key_send_write_signature);
+ S2N_ASYNC_PKEY_SIGN(conn, conn->handshake_params.conn_sig_scheme.sig_alg, signature_hash, s2n_server_key_send_write_signature);
}
int s2n_ecdhe_server_key_send(struct s2n_connection *conn, struct s2n_blob *data_to_sign)
@@ -264,10 +266,10 @@ int s2n_ecdhe_server_key_send(struct s2n_connection *conn, struct s2n_blob *data
struct s2n_stuffer *out = &conn->handshake.io;
/* Generate an ephemeral key and */
- GUARD(s2n_ecc_evp_generate_ephemeral_key(&conn->secure.server_ecc_evp_params));
+ POSIX_GUARD(s2n_ecc_evp_generate_ephemeral_key(&conn->kex_params.server_ecc_evp_params));
/* Write it out and calculate the data to sign later */
- GUARD(s2n_ecc_evp_write_params(&conn->secure.server_ecc_evp_params, out, data_to_sign));
+ POSIX_GUARD(s2n_ecc_evp_write_params(&conn->kex_params.server_ecc_evp_params, out, data_to_sign));
return 0;
}
@@ -276,26 +278,26 @@ int s2n_dhe_server_key_send(struct s2n_connection *conn, struct s2n_blob *data_t
struct s2n_stuffer *out = &conn->handshake.io;
/* Duplicate the DH key from the config */
- GUARD(s2n_dh_params_copy(conn->config->dhparams, &conn->secure.server_dh_params));
+ POSIX_GUARD(s2n_dh_params_copy(conn->config->dhparams, &conn->kex_params.server_dh_params));
/* Generate an ephemeral key */
- GUARD(s2n_dh_generate_ephemeral_key(&conn->secure.server_dh_params));
+ POSIX_GUARD(s2n_dh_generate_ephemeral_key(&conn->kex_params.server_dh_params));
/* Write it out and calculate the data to sign later */
- GUARD(s2n_dh_params_to_p_g_Ys(&conn->secure.server_dh_params, out, data_to_sign));
+ POSIX_GUARD(s2n_dh_params_to_p_g_Ys(&conn->kex_params.server_dh_params, out, data_to_sign));
return 0;
}
int s2n_kem_server_key_send(struct s2n_connection *conn, struct s2n_blob *data_to_sign)
{
struct s2n_stuffer *out = &conn->handshake.io;
- const struct s2n_kem *kem = conn->secure.kem_params.kem;
+ const struct s2n_kem *kem = conn->kex_params.kem_params.kem;
data_to_sign->data = s2n_stuffer_raw_write(out, 0);
- notnull_check(data_to_sign->data);
+ POSIX_ENSURE_REF(data_to_sign->data);
- GUARD(s2n_stuffer_write_uint16(out, kem->kem_extension_id));
- GUARD(s2n_kem_send_public_key(out, &(conn->secure.kem_params)));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, kem->kem_extension_id));
+ POSIX_GUARD(s2n_kem_send_public_key(out, &(conn->kex_params.kem_params)));
data_to_sign->size = sizeof(kem_extension_size) + sizeof(kem_public_key_size) + kem->public_key_length;
@@ -304,21 +306,21 @@ int s2n_kem_server_key_send(struct s2n_connection *conn, struct s2n_blob *data_t
int s2n_hybrid_server_key_send(struct s2n_connection *conn, struct s2n_blob *total_data_to_sign)
{
- notnull_check(conn);
- notnull_check(conn->secure.cipher_suite);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(conn->secure.cipher_suite);
const struct s2n_kex *kex = conn->secure.cipher_suite->key_exchange_alg;
const struct s2n_kex *hybrid_kex_0 = kex->hybrid[0];
const struct s2n_kex *hybrid_kex_1 = kex->hybrid[1];
/* Keep a copy to the start of the whole structure for the signature check */
total_data_to_sign->data = s2n_stuffer_raw_write(&conn->handshake.io, 0);
- notnull_check(total_data_to_sign->data);
+ POSIX_ENSURE_REF(total_data_to_sign->data);
struct s2n_blob data_to_verify_0 = {0};
- GUARD_AS_POSIX(s2n_kex_server_key_send(hybrid_kex_0, conn, &data_to_verify_0));
+ POSIX_GUARD_RESULT(s2n_kex_server_key_send(hybrid_kex_0, conn, &data_to_verify_0));
struct s2n_blob data_to_verify_1 = {0};
- GUARD_AS_POSIX(s2n_kex_server_key_send(hybrid_kex_1, conn, &data_to_verify_1));
+ POSIX_GUARD_RESULT(s2n_kex_server_key_send(hybrid_kex_1, conn, &data_to_verify_1));
total_data_to_sign->size = data_to_verify_0.size + data_to_verify_1.size;
return 0;
@@ -328,8 +330,8 @@ int s2n_server_key_send_write_signature(struct s2n_connection *conn, struct s2n_
{
struct s2n_stuffer *out = &conn->handshake.io;
- GUARD(s2n_stuffer_write_uint16(out, signature->size));
- GUARD(s2n_stuffer_write_bytes(out, signature->data, signature->size));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, signature->size));
+ POSIX_GUARD(s2n_stuffer_write_bytes(out, signature->data, signature->size));
return 0;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_server_new_session_ticket.c b/contrib/restricted/aws/s2n/tls/s2n_server_new_session_ticket.c
index a42c5029d2..a0af0e56ac 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_server_new_session_ticket.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_server_new_session_ticket.c
@@ -15,7 +15,7 @@
#include <sys/param.h>
-#include <s2n.h>
+#include "api/s2n.h"
#include <time.h>
#include "error/s2n_errno.h"
@@ -24,53 +24,376 @@
#include "tls/s2n_alerts.h"
#include "tls/s2n_tls.h"
#include "tls/s2n_resume.h"
+#include "tls/s2n_tls13_handshake.h"
+#include "tls/s2n_record.h"
#include "stuffer/s2n_stuffer.h"
#include "utils/s2n_safety.h"
#include "utils/s2n_random.h"
+/*
+ * The maximum size of the NewSessionTicket message, not taking into account the
+ * ticket itself.
+ *
+ * To get the actual maximum size required for the NewSessionTicket message, we'll need
+ * to add the size of the ticket, which is much less predictable.
+ *
+ * This constant is enforced via unit tests.
+ */
+#define S2N_TLS13_MAX_FIXED_NEW_SESSION_TICKET_SIZE 79
+
int s2n_server_nst_recv(struct s2n_connection *conn) {
- GUARD(s2n_stuffer_read_uint32(&conn->handshake.io, &conn->ticket_lifetime_hint));
+ POSIX_GUARD(s2n_stuffer_read_uint32(&conn->handshake.io, &conn->ticket_lifetime_hint));
uint16_t session_ticket_len;
- GUARD(s2n_stuffer_read_uint16(&conn->handshake.io, &session_ticket_len));
+ POSIX_GUARD(s2n_stuffer_read_uint16(&conn->handshake.io, &session_ticket_len));
if (session_ticket_len > 0) {
- GUARD(s2n_realloc(&conn->client_ticket, session_ticket_len));
+ POSIX_GUARD(s2n_realloc(&conn->client_ticket, session_ticket_len));
+
+ POSIX_GUARD(s2n_stuffer_read(&conn->handshake.io, &conn->client_ticket));
+
+ if (conn->config->session_ticket_cb != NULL) {
+ size_t session_len = s2n_connection_get_session_length(conn);
+
+ /* Alloc some memory for the serialized session ticket */
+ DEFER_CLEANUP(struct s2n_blob mem = { 0 }, s2n_free);
+ POSIX_GUARD(s2n_alloc(&mem, S2N_STATE_FORMAT_LEN + S2N_SESSION_TICKET_SIZE_LEN + \
+ conn->client_ticket.size + S2N_TLS12_STATE_SIZE_IN_BYTES));
+
+ POSIX_GUARD(s2n_connection_get_session(conn, mem.data, session_len));
+ uint32_t session_lifetime = s2n_connection_get_session_ticket_lifetime_hint(conn);
+
+ struct s2n_session_ticket ticket = { .ticket_data = mem, .session_lifetime = session_lifetime };
- GUARD(s2n_stuffer_read(&conn->handshake.io, &conn->client_ticket));
+ POSIX_GUARD(conn->config->session_ticket_cb(conn, conn->config->session_ticket_ctx, &ticket));
+ }
}
- return 0;
+ return S2N_SUCCESS;
}
int s2n_server_nst_send(struct s2n_connection *conn)
{
- uint16_t session_ticket_len = S2N_TICKET_SIZE_IN_BYTES;
- uint8_t data[S2N_TICKET_SIZE_IN_BYTES];
+ uint16_t session_ticket_len = S2N_TLS12_TICKET_SIZE_IN_BYTES;
+ uint8_t data[S2N_TLS12_TICKET_SIZE_IN_BYTES] = { 0 };
struct s2n_blob entry = { .data = data, .size = sizeof(data) };
struct s2n_stuffer to;
uint32_t lifetime_hint_in_secs = (conn->config->encrypt_decrypt_key_lifetime_in_nanos + conn->config->decrypt_key_lifetime_in_nanos) / ONE_SEC_IN_NANOS;
/* When server changes it's mind mid handshake send lifetime hint and session ticket length as zero */
if (!conn->config->use_tickets) {
- GUARD(s2n_stuffer_write_uint32(&conn->handshake.io, 0));
- GUARD(s2n_stuffer_write_uint16(&conn->handshake.io, 0));
+ POSIX_GUARD(s2n_stuffer_write_uint32(&conn->handshake.io, 0));
+ POSIX_GUARD(s2n_stuffer_write_uint16(&conn->handshake.io, 0));
return 0;
}
if (!s2n_server_sending_nst(conn)) {
- S2N_ERROR(S2N_ERR_SENDING_NST);
+ POSIX_BAIL(S2N_ERR_SENDING_NST);
+ }
+
+ POSIX_GUARD(s2n_stuffer_init(&to, &entry));
+ POSIX_GUARD(s2n_stuffer_write_uint32(&conn->handshake.io, lifetime_hint_in_secs));
+ POSIX_GUARD(s2n_stuffer_write_uint16(&conn->handshake.io, session_ticket_len));
+
+ POSIX_GUARD(s2n_encrypt_session_ticket(conn, &to));
+ POSIX_GUARD(s2n_stuffer_write(&conn->handshake.io, &to.blob));
+
+ /* For parity with TLS1.3, track the single ticket sent.
+ * This simplifies s2n_connection_get_tickets_sent.
+ */
+ conn->tickets_sent++;
+ return S2N_SUCCESS;
+}
+
+S2N_RESULT s2n_tls13_server_nst_send(struct s2n_connection *conn, s2n_blocked_status *blocked)
+{
+ RESULT_ENSURE_REF(conn);
+
+ /* Usually tickets are sent immediately after the handshake.
+ * If possible, reuse the handshake IO stuffer before it's wiped.
+ *
+ * Note: handshake.io isn't explicitly dedicated to only reading or only writing,
+ * so we have to be careful using it outside of s2n_negotiate.
+ * If we use it for writing here, we CAN'T use it for reading any post-handshake messages.
+ */
+ struct s2n_stuffer *nst_stuffer = &conn->handshake.io;
+
+ if (conn->mode != S2N_SERVER || conn->actual_protocol_version < S2N_TLS13 || !conn->config->use_tickets) {
+ return S2N_RESULT_OK;
+ }
+
+ /* No-op if all tickets already sent.
+ * Clean up the stuffer used for the ticket to conserve memory. */
+ if (conn->tickets_to_send == conn->tickets_sent) {
+ RESULT_GUARD_POSIX(s2n_stuffer_resize(nst_stuffer, 0));
+ return S2N_RESULT_OK;
+ }
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.6.1
+ *# Note that in principle it is possible to continue issuing new tickets
+ *# which indefinitely extend the lifetime of the keying material
+ *# originally derived from an initial non-PSK handshake (which was most
+ *# likely tied to the peer's certificate). It is RECOMMENDED that
+ *# implementations place limits on the total lifetime of such keying
+ *# material; these limits should take into account the lifetime of the
+ *# peer's certificate, the likelihood of intervening revocation, and the
+ *# time since the peer's online CertificateVerify signature.
+ */
+ if (s2n_result_is_error(s2n_psk_validate_keying_material(conn))) {
+ conn->tickets_to_send = conn->tickets_sent;
+ return S2N_RESULT_OK;
+ }
+
+ RESULT_ENSURE(conn->tickets_sent <= conn->tickets_to_send, S2N_ERR_INTEGER_OVERFLOW);
+
+ size_t session_state_size = 0;
+ RESULT_GUARD(s2n_connection_get_session_state_size(conn, &session_state_size));
+ const size_t maximum_nst_size = session_state_size + S2N_TLS13_MAX_FIXED_NEW_SESSION_TICKET_SIZE;
+ if (s2n_stuffer_space_remaining(nst_stuffer) < maximum_nst_size) {
+ RESULT_GUARD_POSIX(s2n_stuffer_resize(nst_stuffer, maximum_nst_size));
+ }
+
+ while (conn->tickets_to_send - conn->tickets_sent > 0) {
+ if (s2n_result_is_error(s2n_tls13_server_nst_write(conn, nst_stuffer))) {
+ return S2N_RESULT_OK;
+ }
+
+ struct s2n_blob nst_blob = { 0 };
+ uint16_t nst_size = s2n_stuffer_data_available(nst_stuffer);
+ uint8_t *nst_data = s2n_stuffer_raw_read(nst_stuffer, nst_size);
+ RESULT_ENSURE_REF(nst_data);
+ RESULT_GUARD_POSIX(s2n_blob_init(&nst_blob, nst_data, nst_size));
+
+ RESULT_GUARD_POSIX(s2n_record_write(conn, TLS_HANDSHAKE, &nst_blob));
+ RESULT_GUARD_POSIX(s2n_flush(conn, blocked));
+ RESULT_GUARD_POSIX(s2n_stuffer_wipe(nst_stuffer));
+ }
+
+ RESULT_GUARD_POSIX(s2n_stuffer_resize(nst_stuffer, 0));
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.6.1
+ *# Indicates the lifetime in seconds as a 32-bit
+ *# unsigned integer in network byte order from the time of ticket
+ *# issuance.
+ **/
+static S2N_RESULT s2n_generate_ticket_lifetime(struct s2n_connection *conn, uint32_t *ticket_lifetime)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_MUT(ticket_lifetime);
+
+ uint32_t key_lifetime_in_secs =
+ (conn->config->encrypt_decrypt_key_lifetime_in_nanos + conn->config->decrypt_key_lifetime_in_nanos) / ONE_SEC_IN_NANOS;
+ uint32_t session_lifetime_in_secs = conn->config->session_state_lifetime_in_nanos / ONE_SEC_IN_NANOS;
+ uint32_t key_and_session_min_lifetime = MIN(key_lifetime_in_secs, session_lifetime_in_secs);
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.6.1
+ *# Servers MUST NOT use any value greater than
+ *# 604800 seconds (7 days).
+ **/
+ *ticket_lifetime = MIN(key_and_session_min_lifetime, ONE_WEEK_IN_SEC);
+
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.6.1
+ *# A per-ticket value that is unique across all tickets
+ *# issued on this connection.
+ **/
+static S2N_RESULT s2n_generate_ticket_nonce(uint16_t value, struct s2n_blob *output)
+{
+ RESULT_ENSURE_MUT(output);
+
+ struct s2n_stuffer stuffer = { 0 };
+ RESULT_GUARD_POSIX(s2n_stuffer_init(&stuffer, output));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint16(&stuffer, value));
+
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.6.1
+ *# A securely generated, random 32-bit value that is
+ *# used to obscure the age of the ticket that the client includes in
+ *# the "pre_shared_key" extension.
+ **/
+static S2N_RESULT s2n_generate_ticket_age_add(struct s2n_blob *random_data, uint32_t *ticket_age_add)
+{
+ RESULT_ENSURE_REF(random_data);
+ RESULT_ENSURE_REF(ticket_age_add);
+
+ struct s2n_stuffer stuffer = { 0 };
+ RESULT_GUARD_POSIX(s2n_stuffer_init(&stuffer, random_data));
+ RESULT_GUARD_POSIX(s2n_stuffer_skip_write(&stuffer, random_data->size));
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint32(&stuffer, ticket_age_add));
+
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.6.1
+ *# The PSK associated with the ticket is computed as:
+ *#
+ *# HKDF-Expand-Label(resumption_master_secret,
+ *# "resumption", ticket_nonce, Hash.length)
+ **/
+static int s2n_generate_session_secret(struct s2n_connection *conn, struct s2n_blob *nonce, struct s2n_blob *output)
+{
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(nonce);
+ POSIX_ENSURE_REF(output);
+
+ s2n_tls13_connection_keys(secrets, conn);
+ struct s2n_blob master_secret = { 0 };
+ POSIX_GUARD(s2n_blob_init(&master_secret, conn->secrets.tls13.resumption_master_secret, secrets.size));
+ POSIX_GUARD(s2n_realloc(output, secrets.size));
+ POSIX_GUARD_RESULT(s2n_tls13_derive_session_ticket_secret(&secrets, &master_secret, nonce, output));
+
+ return S2N_SUCCESS;
+}
+
+S2N_RESULT s2n_tls13_server_nst_write(struct s2n_connection *conn, struct s2n_stuffer *output)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(output);
+
+ struct s2n_ticket_fields *ticket_fields = &conn->tls13_ticket_fields;
+
+ /* Write message type because session resumption in TLS13 is a post-handshake message */
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint8(output, TLS_SERVER_NEW_SESSION_TICKET));
+
+ struct s2n_stuffer_reservation message_size = { 0 };
+ RESULT_GUARD_POSIX(s2n_stuffer_reserve_uint24(output, &message_size));
+
+ uint32_t ticket_lifetime_in_secs = 0;
+ RESULT_GUARD(s2n_generate_ticket_lifetime(conn, &ticket_lifetime_in_secs));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint32(output, ticket_lifetime_in_secs));
+
+ /* Get random data to use as ticket_age_add value */
+ uint8_t data[sizeof(uint32_t)] = { 0 };
+ struct s2n_blob random_data = { 0 };
+ RESULT_GUARD_POSIX(s2n_blob_init(&random_data, data, sizeof(data)));
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.6.1
+ *# The server MUST generate a fresh value
+ *# for each ticket it sends.
+ **/
+ RESULT_GUARD(s2n_get_private_random_data(&random_data));
+ RESULT_GUARD(s2n_generate_ticket_age_add(&random_data, &ticket_fields->ticket_age_add));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint32(output, ticket_fields->ticket_age_add));
+
+ /* Write ticket nonce */
+ uint8_t nonce_data[sizeof(uint16_t)] = { 0 };
+ struct s2n_blob nonce = { 0 };
+ RESULT_GUARD_POSIX(s2n_blob_init(&nonce, nonce_data, sizeof(nonce_data)));
+ RESULT_GUARD(s2n_generate_ticket_nonce(conn->tickets_sent, &nonce));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_uint8(output, nonce.size));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_bytes(output, nonce.data, nonce.size));
+
+ /* Derive individual session ticket secret */
+ RESULT_GUARD_POSIX(s2n_generate_session_secret(conn, &nonce, &ticket_fields->session_secret));
+
+ /* Write ticket */
+ struct s2n_stuffer_reservation ticket_size = { 0 };
+ RESULT_GUARD_POSIX(s2n_stuffer_reserve_uint16(output, &ticket_size));
+ RESULT_GUARD_POSIX(s2n_encrypt_session_ticket(conn, output));
+ RESULT_GUARD_POSIX(s2n_stuffer_write_vector_size(&ticket_size));
+
+ RESULT_GUARD_POSIX(s2n_extension_list_send(S2N_EXTENSION_LIST_NST, conn, output));
+
+ RESULT_GUARD_POSIX(s2n_stuffer_write_vector_size(&message_size));
+
+ RESULT_ENSURE(conn->tickets_sent < UINT16_MAX, S2N_ERR_INTEGER_OVERFLOW);
+ conn->tickets_sent++;
+
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.6.1
+ *# struct {
+ *# uint32 ticket_lifetime;
+ *# uint32 ticket_age_add;
+ *# opaque ticket_nonce<0..255>;
+ *# opaque ticket<1..2^16-1>;
+ *# Extension extensions<0..2^16-2>;
+ *# } NewSessionTicket;
+**/
+S2N_RESULT s2n_tls13_server_nst_recv(struct s2n_connection *conn, struct s2n_stuffer *input)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(input);
+ RESULT_ENSURE_REF(conn->config);
+
+ RESULT_ENSURE(conn->actual_protocol_version >= S2N_TLS13, S2N_ERR_BAD_MESSAGE);
+ RESULT_ENSURE(conn->mode == S2N_CLIENT, S2N_ERR_BAD_MESSAGE);
+
+ if (!conn->config->use_tickets) {
+ return S2N_RESULT_OK;
+ }
+ struct s2n_ticket_fields *ticket_fields = &conn->tls13_ticket_fields;
+
+ /* Handle `ticket_lifetime` field */
+ uint32_t ticket_lifetime = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint32(input, &ticket_lifetime));
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.6.1
+ *# Servers MUST NOT use any value greater than
+ *# 604800 seconds (7 days).
+ */
+ RESULT_ENSURE(ticket_lifetime <= ONE_WEEK_IN_SEC, S2N_ERR_BAD_MESSAGE);
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.6.1
+ *# The value of zero indicates that the
+ *# ticket should be discarded immediately.
+ */
+ if (ticket_lifetime == 0) {
+ return S2N_RESULT_OK;
}
+ conn->ticket_lifetime_hint = ticket_lifetime;
- GUARD(s2n_stuffer_init(&to, &entry));
- GUARD(s2n_stuffer_write_uint32(&conn->handshake.io, lifetime_hint_in_secs));
- GUARD(s2n_stuffer_write_uint16(&conn->handshake.io, session_ticket_len));
+ /* Handle `ticket_age_add` field */
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint32(input, &ticket_fields->ticket_age_add));
- GUARD(s2n_encrypt_session_ticket(conn, &to));
- GUARD(s2n_stuffer_write(&conn->handshake.io, &to.blob));
+ /* Handle `ticket_nonce` field */
+ uint8_t ticket_nonce_len = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint8(input, &ticket_nonce_len));
+ uint8_t nonce_data[UINT8_MAX] = { 0 };
+ struct s2n_blob nonce = { 0 };
+ RESULT_GUARD_POSIX(s2n_blob_init(&nonce, nonce_data, ticket_nonce_len));
+ RESULT_GUARD_POSIX(s2n_stuffer_read_bytes(input, nonce.data, ticket_nonce_len));
+ RESULT_GUARD_POSIX(s2n_generate_session_secret(conn, &nonce, &ticket_fields->session_secret));
+
+ /* Handle `ticket` field */
+ uint16_t session_ticket_len = 0;
+ RESULT_GUARD_POSIX(s2n_stuffer_read_uint16(input, &session_ticket_len));
+ RESULT_ENSURE(session_ticket_len > 0, S2N_ERR_SAFETY);
+ RESULT_GUARD_POSIX(s2n_realloc(&conn->client_ticket, session_ticket_len));
+ RESULT_GUARD_POSIX(s2n_stuffer_read(input, &conn->client_ticket));
+
+ /* Handle `extensions` field */
+ RESULT_GUARD_POSIX(s2n_extension_list_recv(S2N_EXTENSION_LIST_NST, conn, input));
+
+ if (conn->config->session_ticket_cb != NULL) {
+ /* Retrieve serialized session data */
+ const uint16_t session_state_size = s2n_connection_get_session_length(conn);
+ DEFER_CLEANUP(struct s2n_blob session_state = { 0 }, s2n_free);
+ RESULT_GUARD_POSIX(s2n_realloc(&session_state, session_state_size));
+ RESULT_GUARD_POSIX(s2n_connection_get_session(conn, session_state.data, session_state.size));
+
+ struct s2n_session_ticket ticket = {
+ .ticket_data = session_state,
+ .session_lifetime = ticket_lifetime
+ };
+ RESULT_GUARD_POSIX(conn->config->session_ticket_cb(conn, conn->config->session_ticket_ctx, &ticket));
+ }
- return 0;
+ return S2N_RESULT_OK;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_shutdown.c b/contrib/restricted/aws/s2n/tls/s2n_shutdown.c
index 700d94b1d1..383d3026a5 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_shutdown.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_shutdown.c
@@ -13,7 +13,7 @@
* permissions and limitations under the License.
*/
-#include <s2n.h>
+#include "api/s2n.h"
#include "tls/s2n_alerts.h"
#include "tls/s2n_connection.h"
@@ -23,8 +23,8 @@
int s2n_shutdown(struct s2n_connection *conn, s2n_blocked_status * more)
{
- notnull_check(conn);
- notnull_check(more);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(more);
/* Treat this call as a no-op if already wiped */
if (conn->send == NULL && conn->recv == NULL) {
@@ -32,24 +32,27 @@ int s2n_shutdown(struct s2n_connection *conn, s2n_blocked_status * more)
}
uint64_t elapsed;
- GUARD_AS_POSIX(s2n_timer_elapsed(conn->config, &conn->write_timer, &elapsed));
+ POSIX_GUARD_RESULT(s2n_timer_elapsed(conn->config, &conn->write_timer, &elapsed));
S2N_ERROR_IF(elapsed < conn->delay, S2N_ERR_SHUTDOWN_PAUSED);
/* Queue our close notify, once. Use warning level so clients don't give up */
- GUARD(s2n_queue_writer_close_alert_warning(conn));
+ POSIX_GUARD(s2n_queue_writer_close_alert_warning(conn));
/* Write it */
- GUARD(s2n_flush(conn, more));
+ POSIX_GUARD(s2n_flush(conn, more));
/* Assume caller isn't interested in pending incoming data */
if (conn->in_status == PLAINTEXT) {
- GUARD(s2n_stuffer_wipe(&conn->header_in));
- GUARD(s2n_stuffer_wipe(&conn->in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->header_in));
+ POSIX_GUARD(s2n_stuffer_wipe(&conn->in));
conn->in_status = ENCRYPTED;
}
- /* Fails with S2N_ERR_SHUTDOWN_RECORD_TYPE or S2N_ERR_ALERT on receipt of anything but a close_notify */
- GUARD(s2n_recv_close_notify(conn, more));
+ /* Dont expect to receive another close notify alert if we have already received it */
+ if (!conn->close_notify_received) {
+ /* Fails with S2N_ERR_SHUTDOWN_RECORD_TYPE or S2N_ERR_ALERT on receipt of anything but a close_notify */
+ POSIX_GUARD(s2n_recv_close_notify(conn, more));
+ }
return 0;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_signature_algorithms.c b/contrib/restricted/aws/s2n/tls/s2n_signature_algorithms.c
index 4645eae5f1..ae367e9670 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_signature_algorithms.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_signature_algorithms.c
@@ -21,7 +21,6 @@
#include "tls/s2n_auth_selection.h"
#include "tls/s2n_cipher_suites.h"
#include "tls/s2n_kex.h"
-#include "tls/s2n_tls_digest_preferences.h"
#include "tls/s2n_signature_algorithms.h"
#include "tls/s2n_signature_scheme.h"
#include "tls/s2n_security_policies.h"
@@ -31,14 +30,19 @@
static int s2n_signature_scheme_valid_to_offer(struct s2n_connection *conn, const struct s2n_signature_scheme *scheme)
{
/* We don't know what protocol version we will eventually negotiate, but we know that it won't be any higher. */
- gte_check(conn->actual_protocol_version, scheme->minimum_protocol_version);
+ POSIX_ENSURE_GTE(conn->actual_protocol_version, scheme->minimum_protocol_version);
+
+ /* QUIC only supports TLS1.3 */
+ if (s2n_connection_is_quic_enabled(conn) && scheme->maximum_protocol_version) {
+ POSIX_ENSURE_GTE(scheme->maximum_protocol_version, S2N_TLS13);
+ }
if (!s2n_is_rsa_pss_signing_supported()) {
- ne_check(scheme->sig_alg, S2N_SIGNATURE_RSA_PSS_RSAE);
+ POSIX_ENSURE_NE(scheme->sig_alg, S2N_SIGNATURE_RSA_PSS_RSAE);
}
if (!s2n_is_rsa_pss_certs_supported()) {
- ne_check(scheme->sig_alg, S2N_SIGNATURE_RSA_PSS_PSS);
+ POSIX_ENSURE_NE(scheme->sig_alg, S2N_SIGNATURE_RSA_PSS_PSS);
}
return 0;
@@ -46,23 +50,23 @@ static int s2n_signature_scheme_valid_to_offer(struct s2n_connection *conn, cons
static int s2n_signature_scheme_valid_to_accept(struct s2n_connection *conn, const struct s2n_signature_scheme *scheme)
{
- notnull_check(scheme);
+ POSIX_ENSURE_REF(scheme);
- GUARD(s2n_signature_scheme_valid_to_offer(conn, scheme));
+ POSIX_GUARD(s2n_signature_scheme_valid_to_offer(conn, scheme));
if (scheme->maximum_protocol_version != S2N_UNKNOWN_PROTOCOL_VERSION) {
- lte_check(conn->actual_protocol_version, scheme->maximum_protocol_version);
+ POSIX_ENSURE_LTE(conn->actual_protocol_version, scheme->maximum_protocol_version);
}
return 0;
}
static int s2n_is_signature_scheme_usable(struct s2n_connection *conn, const struct s2n_signature_scheme *candidate) {
- notnull_check(conn);
- notnull_check(candidate);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(candidate);
- GUARD(s2n_signature_scheme_valid_to_accept(conn, candidate));
- GUARD(s2n_is_sig_scheme_valid_for_auth(conn, candidate));
+ POSIX_GUARD(s2n_signature_scheme_valid_to_accept(conn, candidate));
+ POSIX_GUARD(s2n_is_sig_scheme_valid_for_auth(conn, candidate));
return S2N_SUCCESS;
}
@@ -70,13 +74,13 @@ static int s2n_is_signature_scheme_usable(struct s2n_connection *conn, const str
static int s2n_choose_sig_scheme(struct s2n_connection *conn, struct s2n_sig_scheme_list *peer_wire_prefs,
struct s2n_signature_scheme *chosen_scheme_out)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
const struct s2n_signature_preferences *signature_preferences = NULL;
- GUARD(s2n_connection_get_signature_preferences(conn, &signature_preferences));
- notnull_check(signature_preferences);
+ POSIX_GUARD(s2n_connection_get_signature_preferences(conn, &signature_preferences));
+ POSIX_ENSURE_REF(signature_preferences);
struct s2n_cipher_suite *cipher_suite = conn->secure.cipher_suite;
- notnull_check(cipher_suite);
+ POSIX_ENSURE_REF(cipher_suite);
for (size_t i = 0; i < signature_preferences->count; i++) {
const struct s2n_signature_scheme *candidate = signature_preferences->signature_schemes[i];
@@ -100,15 +104,15 @@ static int s2n_choose_sig_scheme(struct s2n_connection *conn, struct s2n_sig_sch
}
/* similar to s2n_choose_sig_scheme() without matching client's preference */
-static int s2n_tls13_default_sig_scheme(struct s2n_connection *conn, struct s2n_signature_scheme *chosen_scheme_out)
+int s2n_tls13_default_sig_scheme(struct s2n_connection *conn, struct s2n_signature_scheme *chosen_scheme_out)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
const struct s2n_signature_preferences *signature_preferences = NULL;
- GUARD(s2n_connection_get_signature_preferences(conn, &signature_preferences));
- notnull_check(signature_preferences);
+ POSIX_GUARD(s2n_connection_get_signature_preferences(conn, &signature_preferences));
+ POSIX_ENSURE_REF(signature_preferences);
struct s2n_cipher_suite *cipher_suite = conn->secure.cipher_suite;
- notnull_check(cipher_suite);
+ POSIX_ENSURE_REF(cipher_suite);
for (size_t i = 0; i < signature_preferences->count; i++) {
const struct s2n_signature_scheme *candidate = signature_preferences->signature_schemes[i];
@@ -121,18 +125,18 @@ static int s2n_tls13_default_sig_scheme(struct s2n_connection *conn, struct s2n_
return S2N_SUCCESS;
}
- S2N_ERROR(S2N_ERR_INVALID_SIGNATURE_SCHEME);
+ POSIX_BAIL(S2N_ERR_INVALID_SIGNATURE_SCHEME);
}
int s2n_get_and_validate_negotiated_signature_scheme(struct s2n_connection *conn, struct s2n_stuffer *in,
struct s2n_signature_scheme *chosen_sig_scheme)
{
uint16_t actual_iana_val;
- GUARD(s2n_stuffer_read_uint16(in, &actual_iana_val));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &actual_iana_val));
const struct s2n_signature_preferences *signature_preferences = NULL;
- GUARD(s2n_connection_get_signature_preferences(conn, &signature_preferences));
- notnull_check(signature_preferences);
+ POSIX_GUARD(s2n_connection_get_signature_preferences(conn, &signature_preferences));
+ POSIX_ENSURE_REF(signature_preferences);
for (size_t i = 0; i < signature_preferences->count; i++) {
const struct s2n_signature_scheme *candidate = signature_preferences->signature_schemes[i];
@@ -150,8 +154,8 @@ int s2n_get_and_validate_negotiated_signature_scheme(struct s2n_connection *conn
/* We require an exact match in TLS 1.3, but all previous versions can fall back to the default SignatureScheme.
* This means that an s2n client will accept the default SignatureScheme from a TLS server, even if the client did
* not send it in it's ClientHello. This pre-TLS1.3 behavior is an intentional choice to maximize support. */
- struct s2n_signature_scheme default_scheme;
- GUARD(s2n_choose_default_sig_scheme(conn, &default_scheme));
+ struct s2n_signature_scheme default_scheme = { 0 };
+ POSIX_GUARD(s2n_choose_default_sig_scheme(conn, &default_scheme, S2N_PEER_MODE(conn->mode)));
if ((conn->actual_protocol_version <= S2N_TLS12)
&& (s2n_signature_scheme_valid_to_accept(conn, &default_scheme) == S2N_SUCCESS)
@@ -161,16 +165,21 @@ int s2n_get_and_validate_negotiated_signature_scheme(struct s2n_connection *conn
return S2N_SUCCESS;
}
- S2N_ERROR(S2N_ERR_INVALID_SIGNATURE_SCHEME);
+ POSIX_BAIL(S2N_ERR_INVALID_SIGNATURE_SCHEME);
}
-int s2n_choose_default_sig_scheme(struct s2n_connection *conn, struct s2n_signature_scheme *sig_scheme_out)
+int s2n_choose_default_sig_scheme(struct s2n_connection *conn, struct s2n_signature_scheme *sig_scheme_out, s2n_mode signer)
{
- notnull_check(conn);
- notnull_check(conn->secure.cipher_suite);
- notnull_check(sig_scheme_out);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(sig_scheme_out);
- s2n_authentication_method cipher_suite_auth_method = conn->secure.cipher_suite->auth_method;
+ s2n_authentication_method auth_method = 0;
+ if (signer == S2N_CLIENT) {
+ POSIX_GUARD(s2n_get_auth_method_for_cert_type(conn->handshake_params.client_cert_pkey_type, &auth_method));
+ } else {
+ POSIX_ENSURE_REF(conn->secure.cipher_suite);
+ auth_method = conn->secure.cipher_suite->auth_method;
+ }
/* Default our signature digest algorithms. For TLS 1.2 this default is different and may be
* overridden by the signature_algorithms extension. If the server chooses an ECDHE_ECDSA
@@ -178,13 +187,11 @@ int s2n_choose_default_sig_scheme(struct s2n_connection *conn, struct s2n_signat
*/
*sig_scheme_out = s2n_rsa_pkcs1_md5_sha1;
- if (cipher_suite_auth_method == S2N_AUTHENTICATION_ECDSA) {
+ if (auth_method == S2N_AUTHENTICATION_ECDSA) {
*sig_scheme_out = s2n_ecdsa_sha1;
- }
-
- /* Default RSA Hash Algorithm is SHA1 (instead of MD5_SHA1) if TLS 1.2 or FIPS mode */
- if ((conn->actual_protocol_version >= S2N_TLS12 || s2n_is_in_fips_mode())
- && (sig_scheme_out->sig_alg == S2N_SIGNATURE_RSA)) {
+ } else if (conn->actual_protocol_version >= S2N_TLS12) {
+ *sig_scheme_out = s2n_rsa_pkcs1_sha1;
+ } else if (s2n_is_in_fips_mode() && signer == S2N_SERVER) {
*sig_scheme_out = s2n_rsa_pkcs1_sha1;
}
@@ -194,41 +201,39 @@ int s2n_choose_default_sig_scheme(struct s2n_connection *conn, struct s2n_signat
int s2n_choose_sig_scheme_from_peer_preference_list(struct s2n_connection *conn, struct s2n_sig_scheme_list *peer_wire_prefs,
struct s2n_signature_scheme *sig_scheme_out)
{
- notnull_check(conn);
- notnull_check(sig_scheme_out);
-
- struct s2n_signature_scheme chosen_scheme;
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(sig_scheme_out);
+ struct s2n_signature_scheme chosen_scheme = { 0 };
if (conn->actual_protocol_version < S2N_TLS13) {
- GUARD(s2n_choose_default_sig_scheme(conn, &chosen_scheme));
+ POSIX_GUARD(s2n_choose_default_sig_scheme(conn, &chosen_scheme, conn->mode));
} else {
/* Pick a default signature algorithm in TLS 1.3 https://tools.ietf.org/html/rfc8446#section-4.4.2.2 */
- GUARD(s2n_tls13_default_sig_scheme(conn, &chosen_scheme));
+ POSIX_GUARD(s2n_tls13_default_sig_scheme(conn, &chosen_scheme));
}
/* SignatureScheme preference list was first added in TLS 1.2. It will be empty in older TLS versions. */
if (peer_wire_prefs != NULL && peer_wire_prefs->len > 0) {
/* Use a best effort approach to selecting a signature scheme matching client's preferences */
- GUARD(s2n_choose_sig_scheme(conn, peer_wire_prefs, &chosen_scheme));
+ POSIX_GUARD(s2n_choose_sig_scheme(conn, peer_wire_prefs, &chosen_scheme));
}
*sig_scheme_out = chosen_scheme;
-
return S2N_SUCCESS;
}
int s2n_send_supported_sig_scheme_list(struct s2n_connection *conn, struct s2n_stuffer *out)
{
const struct s2n_signature_preferences *signature_preferences = NULL;
- GUARD(s2n_connection_get_signature_preferences(conn, &signature_preferences));
- notnull_check(signature_preferences);
+ POSIX_GUARD(s2n_connection_get_signature_preferences(conn, &signature_preferences));
+ POSIX_ENSURE_REF(signature_preferences);
- GUARD(s2n_stuffer_write_uint16(out, s2n_supported_sig_scheme_list_size(conn)));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, s2n_supported_sig_scheme_list_size(conn)));
for (size_t i = 0; i < signature_preferences->count; i++) {
const struct s2n_signature_scheme *const scheme = signature_preferences->signature_schemes[i];
if (0 == s2n_signature_scheme_valid_to_offer(conn, scheme)) {
- GUARD(s2n_stuffer_write_uint16(out, scheme->iana_value));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, scheme->iana_value));
}
}
@@ -243,8 +248,8 @@ int s2n_supported_sig_scheme_list_size(struct s2n_connection *conn)
int s2n_supported_sig_schemes_count(struct s2n_connection *conn)
{
const struct s2n_signature_preferences *signature_preferences = NULL;
- GUARD(s2n_connection_get_signature_preferences(conn, &signature_preferences));
- notnull_check(signature_preferences);
+ POSIX_GUARD(s2n_connection_get_signature_preferences(conn, &signature_preferences));
+ POSIX_ENSURE_REF(signature_preferences);
uint8_t count = 0;
for (size_t i = 0; i < signature_preferences->count; i++) {
@@ -258,7 +263,7 @@ int s2n_supported_sig_schemes_count(struct s2n_connection *conn)
int s2n_recv_supported_sig_scheme_list(struct s2n_stuffer *in, struct s2n_sig_scheme_list *sig_hash_algs)
{
uint16_t length_of_all_pairs;
- GUARD(s2n_stuffer_read_uint16(in, &length_of_all_pairs));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &length_of_all_pairs));
if (length_of_all_pairs > s2n_stuffer_data_available(in)) {
/* Malformed length, ignore the extension */
return 0;
@@ -266,21 +271,21 @@ int s2n_recv_supported_sig_scheme_list(struct s2n_stuffer *in, struct s2n_sig_sc
if (length_of_all_pairs % 2) {
/* Pairs occur in two byte lengths. Malformed length, ignore the extension and skip ahead */
- GUARD(s2n_stuffer_skip_read(in, length_of_all_pairs));
+ POSIX_GUARD(s2n_stuffer_skip_read(in, length_of_all_pairs));
return 0;
}
int pairs_available = length_of_all_pairs / 2;
if (pairs_available > TLS_SIGNATURE_SCHEME_LIST_MAX_LEN) {
- S2N_ERROR(S2N_ERR_TOO_MANY_SIGNATURE_SCHEMES);
+ POSIX_BAIL(S2N_ERR_TOO_MANY_SIGNATURE_SCHEMES);
}
sig_hash_algs->len = 0;
for (size_t i = 0; i < pairs_available; i++) {
uint16_t sig_scheme = 0;
- GUARD(s2n_stuffer_read_uint16(in, &sig_scheme));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &sig_scheme));
sig_hash_algs->iana_list[sig_hash_algs->len] = sig_scheme;
sig_hash_algs->len += 1;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_signature_algorithms.h b/contrib/restricted/aws/s2n/tls/s2n_signature_algorithms.h
index b400977a16..f1533335ba 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_signature_algorithms.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_signature_algorithms.h
@@ -15,7 +15,7 @@
#pragma once
-#include <s2n.h>
+#include "api/s2n.h"
#include "crypto/s2n_hash.h"
#include "crypto/s2n_signature.h"
@@ -29,7 +29,9 @@ struct s2n_sig_scheme_list {
uint8_t len;
};
-int s2n_choose_default_sig_scheme(struct s2n_connection *conn, struct s2n_signature_scheme *sig_scheme_out);
+int s2n_choose_default_sig_scheme(struct s2n_connection *conn, struct s2n_signature_scheme *sig_scheme_out, s2n_mode signer);
+int s2n_tls13_default_sig_scheme(struct s2n_connection *conn, struct s2n_signature_scheme *sig_scheme_out);
+
int s2n_choose_sig_scheme_from_peer_preference_list(struct s2n_connection *conn, struct s2n_sig_scheme_list *sig_hash_algs,
struct s2n_signature_scheme *sig_scheme_out);
int s2n_get_and_validate_negotiated_signature_scheme(struct s2n_connection *conn, struct s2n_stuffer *in,
diff --git a/contrib/restricted/aws/s2n/tls/s2n_signature_scheme.c b/contrib/restricted/aws/s2n/tls/s2n_signature_scheme.c
index 911e717127..6ccddacab6 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_signature_scheme.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_signature_scheme.c
@@ -13,7 +13,7 @@
* permissions and limitations under the License.
*/
-#include <s2n.h>
+#include "api/s2n.h"
#include "crypto/s2n_hash.h"
#include "crypto/s2n_signature.h"
@@ -339,3 +339,19 @@ const struct s2n_signature_preferences s2n_certificate_signature_preferences_202
.count = s2n_array_len(s2n_sig_scheme_pref_list_20201110),
.signature_schemes = s2n_sig_scheme_pref_list_20201110,
};
+
+/* Based on s2n_sig_scheme_pref_list_20140601 but with all hashes < SHA-384 removed */
+const struct s2n_signature_scheme* const s2n_sig_scheme_pref_list_20210816[] = {
+ /* RSA PKCS1 */
+ &s2n_rsa_pkcs1_sha384,
+ &s2n_rsa_pkcs1_sha512,
+
+ /* ECDSA - TLS 1.2 */
+ &s2n_ecdsa_sha384, /* same iana value as TLS 1.3 s2n_ecdsa_secp384r1_sha384 */
+ &s2n_ecdsa_sha512,
+};
+
+const struct s2n_signature_preferences s2n_signature_preferences_20210816 = {
+ .count = s2n_array_len(s2n_sig_scheme_pref_list_20210816),
+ .signature_schemes = s2n_sig_scheme_pref_list_20210816
+};
diff --git a/contrib/restricted/aws/s2n/tls/s2n_signature_scheme.h b/contrib/restricted/aws/s2n/tls/s2n_signature_scheme.h
index 416f936a6d..3f78d8a500 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_signature_scheme.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_signature_scheme.h
@@ -15,7 +15,7 @@
#pragma once
-#include <s2n.h>
+#include "api/s2n.h"
#include <strings.h>
#include "crypto/s2n_hash.h"
@@ -76,6 +76,7 @@ extern const struct s2n_signature_scheme s2n_rsa_pss_rsae_sha512;
extern const struct s2n_signature_preferences s2n_signature_preferences_20140601;
extern const struct s2n_signature_preferences s2n_signature_preferences_20200207;
extern const struct s2n_signature_preferences s2n_signature_preferences_20201021;
+extern const struct s2n_signature_preferences s2n_signature_preferences_20210816;
extern const struct s2n_signature_preferences s2n_signature_preferences_null;
extern const struct s2n_signature_preferences s2n_certificate_signature_preferences_20201110;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_tls.h b/contrib/restricted/aws/s2n/tls/s2n_tls.h
index d74822a236..7f17c8bc7c 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_tls.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_tls.h
@@ -24,6 +24,7 @@ extern uint8_t s2n_unknown_protocol_version;
extern uint8_t s2n_highest_protocol_version;
extern int s2n_flush(struct s2n_connection *conn, s2n_blocked_status * more);
+int s2n_client_hello_request_recv(struct s2n_connection *conn);
extern int s2n_client_hello_send(struct s2n_connection *conn);
extern int s2n_client_hello_recv(struct s2n_connection *conn);
extern int s2n_establish_session(struct s2n_connection *conn);
@@ -57,6 +58,9 @@ extern int s2n_tls13_cert_verify_recv(struct s2n_connection *conn);
extern int s2n_tls13_cert_verify_send(struct s2n_connection *conn);
extern int s2n_server_nst_send(struct s2n_connection *conn);
extern int s2n_server_nst_recv(struct s2n_connection *conn);
+S2N_RESULT s2n_tls13_server_nst_send(struct s2n_connection *conn, s2n_blocked_status *blocked);
+S2N_RESULT s2n_tls13_server_nst_write(struct s2n_connection *conn, struct s2n_stuffer *output);
+S2N_RESULT s2n_tls13_server_nst_recv(struct s2n_connection *conn, struct s2n_stuffer *input);
extern int s2n_ccs_send(struct s2n_connection *conn);
extern int s2n_basic_ccs_recv(struct s2n_connection *conn);
extern int s2n_server_ccs_recv(struct s2n_connection *conn);
@@ -69,6 +73,8 @@ extern int s2n_tls13_client_finished_send(struct s2n_connection *conn);
extern int s2n_tls13_client_finished_recv(struct s2n_connection *conn);
extern int s2n_tls13_server_finished_send(struct s2n_connection *conn);
extern int s2n_tls13_server_finished_recv(struct s2n_connection *conn);
+extern int s2n_end_of_early_data_send(struct s2n_connection *conn);
+extern int s2n_end_of_early_data_recv(struct s2n_connection *conn);
extern int s2n_process_client_hello(struct s2n_connection *conn);
extern int s2n_handshake_write_header(struct s2n_stuffer *out, uint8_t message_type);
extern int s2n_handshake_finish_header(struct s2n_stuffer *out);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_tls13.c b/contrib/restricted/aws/s2n/tls/s2n_tls13.c
index 52dd99a86a..790ad8cadc 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_tls13.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_tls13.c
@@ -16,6 +16,7 @@
#include "api/s2n.h"
#include "tls/s2n_tls.h"
#include "tls/s2n_tls13.h"
+#include "crypto/s2n_rsa_pss.h"
#include "crypto/s2n_rsa_signing.h"
bool s2n_use_default_tls13_config_flag = false;
@@ -25,6 +26,17 @@ bool s2n_use_default_tls13_config()
return s2n_use_default_tls13_config_flag;
}
+bool s2n_is_tls13_fully_supported()
+{
+ /* Older versions of Openssl (eg 1.0.2) do not support RSA PSS, which is required for TLS 1.3. */
+ return s2n_is_rsa_pss_signing_supported() && s2n_is_rsa_pss_certs_supported();
+}
+
+int s2n_get_highest_fully_supported_tls_version()
+{
+ return s2n_is_tls13_fully_supported() ? S2N_TLS13 : S2N_TLS12;
+}
+
/* Allow TLS1.3 to be negotiated, and use the default TLS1.3 security policy.
* This is NOT the default behavior, and this method is deprecated.
*
@@ -33,6 +45,17 @@ bool s2n_use_default_tls13_config()
*/
int s2n_enable_tls13()
{
+ return s2n_enable_tls13_in_test();
+}
+
+/* Allow TLS1.3 to be negotiated, and use the default TLS1.3 security policy.
+ * This is NOT the default behavior, and this method is deprecated.
+ *
+ * Please consider using the default behavior and configuring
+ * TLS1.2/TLS1.3 via explicit security policy instead.
+ */
+int s2n_enable_tls13_in_test()
+{
s2n_highest_protocol_version = S2N_TLS13;
s2n_use_default_tls13_config_flag = true;
return S2N_SUCCESS;
@@ -44,9 +67,9 @@ int s2n_enable_tls13()
* Please consider using the default behavior and configuring
* TLS1.2/TLS1.3 via explicit security policy instead.
*/
-int s2n_disable_tls13()
+int s2n_disable_tls13_in_test()
{
- ENSURE_POSIX(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST);
+ POSIX_ENSURE(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST);
s2n_highest_protocol_version = S2N_TLS12;
s2n_use_default_tls13_config_flag = false;
return S2N_SUCCESS;
@@ -57,9 +80,9 @@ int s2n_disable_tls13()
* This method is intended for use in existing unit tests when the APIs
* to enable/disable TLS1.3 have already been called.
*/
-int s2n_reset_tls13()
+int s2n_reset_tls13_in_test()
{
- ENSURE_POSIX(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST);
+ POSIX_ENSURE(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST);
s2n_highest_protocol_version = S2N_TLS13;
s2n_use_default_tls13_config_flag = false;
return S2N_SUCCESS;
@@ -73,3 +96,53 @@ bool s2n_is_valid_tls13_cipher(const uint8_t version[2]) {
*/
return version[0] == 0x13 && version[1] >= 0x01 && version[1] <= 0x05;
}
+
+/* Use middlebox compatibility mode for TLS1.3 by default.
+ * For now, only disable it when QUIC support is enabled.
+ */
+bool s2n_is_middlebox_compat_enabled(struct s2n_connection *conn)
+{
+ return s2n_connection_get_protocol_version(conn) >= S2N_TLS13
+ && !s2n_connection_is_quic_enabled(conn);
+}
+
+S2N_RESULT s2n_connection_validate_tls13_support(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+
+ /* If the underlying libcrypto supports all features of TLS1.3
+ * (including RSA-PSS, which is unsupported by some libraries),
+ * then we can always support TLS1.3.
+ */
+ if (s2n_is_tls13_fully_supported()) {
+ return S2N_RESULT_OK;
+ }
+
+ /*
+ * If the underlying libcrypto doesn't support all features...
+ */
+
+ /* There are some TLS servers in the wild that will choose options not offered by the client.
+ * So a server might choose to use RSA-PSS even if even if the client does not advertise support for RSA-PSS.
+ * Therefore, only servers can perform TLS1.3 without full feature support.
+ */
+ RESULT_ENSURE(conn->mode == S2N_SERVER, S2N_RSA_PSS_NOT_SUPPORTED);
+
+ /* RSA signatures must use RSA-PSS in TLS1.3.
+ * So RSA-PSS is required for TLS1.3 servers if an RSA certificate is used.
+ */
+ RESULT_ENSURE(!conn->config->is_rsa_cert_configured, S2N_RSA_PSS_NOT_SUPPORTED);
+
+ /* RSA-PSS is also required for TLS1.3 servers if client auth is requested, because the
+ * client might offer an RSA certificate.
+ */
+ s2n_cert_auth_type client_auth_status = S2N_CERT_AUTH_NONE;
+ RESULT_GUARD_POSIX(s2n_connection_get_client_auth_type(conn, &client_auth_status));
+ RESULT_ENSURE(client_auth_status == S2N_CERT_AUTH_NONE, S2N_RSA_PSS_NOT_SUPPORTED);
+
+ return S2N_RESULT_OK;
+}
+
+bool s2n_connection_supports_tls13(struct s2n_connection *conn) {
+ return s2n_result_is_ok(s2n_connection_validate_tls13_support(conn));
+}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_tls13.h b/contrib/restricted/aws/s2n/tls/s2n_tls13.h
index 53dedaf02e..7ad815bd71 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_tls13.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_tls13.h
@@ -42,9 +42,16 @@ extern "C" {
extern uint8_t hello_retry_req_random[S2N_TLS_RANDOM_DATA_LEN];
bool s2n_use_default_tls13_config();
-int s2n_disable_tls13();
-int s2n_reset_tls13();
+bool s2n_is_tls13_fully_supported();
+int s2n_get_highest_fully_supported_tls_version();
+int s2n_enable_tls13_in_test();
+int s2n_disable_tls13_in_test();
+int s2n_reset_tls13_in_test();
bool s2n_is_valid_tls13_cipher(const uint8_t version[2]);
+S2N_RESULT s2n_connection_validate_tls13_support(struct s2n_connection *conn);
+bool s2n_connection_supports_tls13(struct s2n_connection *conn);
+
+bool s2n_is_middlebox_compat_enabled(struct s2n_connection *conn);
bool s2n_is_hello_retry_handshake(struct s2n_connection *conn);
bool s2n_is_hello_retry_message(struct s2n_connection *conn);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_tls13_certificate_verify.c b/contrib/restricted/aws/s2n/tls/s2n_tls13_certificate_verify.c
index 74654f33cf..abc96b8b21 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_tls13_certificate_verify.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_tls13_certificate_verify.c
@@ -60,10 +60,10 @@ int s2n_tls13_cert_verify_send(struct s2n_connection *conn)
if (conn->mode == S2N_SERVER) {
/* Write digital signature */
- GUARD(s2n_tls13_write_cert_verify_signature(conn, &conn->secure.conn_sig_scheme));
+ POSIX_GUARD(s2n_tls13_write_cert_verify_signature(conn, &conn->handshake_params.conn_sig_scheme));
} else {
/* Write digital signature */
- GUARD(s2n_tls13_write_cert_verify_signature(conn, &conn->secure.client_cert_sig_scheme));
+ POSIX_GUARD(s2n_tls13_write_cert_verify_signature(conn, &conn->handshake_params.client_cert_sig_scheme));
}
@@ -72,20 +72,20 @@ int s2n_tls13_cert_verify_send(struct s2n_connection *conn)
int s2n_tls13_write_cert_verify_signature(struct s2n_connection *conn, struct s2n_signature_scheme *chosen_sig_scheme)
{
- notnull_check(conn->handshake_params.our_chain_and_key);
+ POSIX_ENSURE_REF(conn->handshake_params.our_chain_and_key);
/* Write the SignatureScheme out */
struct s2n_stuffer *out = &conn->handshake.io;
- GUARD(s2n_stuffer_write_uint16(out, chosen_sig_scheme->iana_value));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, chosen_sig_scheme->iana_value));
DEFER_CLEANUP(struct s2n_hash_state message_hash = {0}, s2n_hash_free);
- GUARD(s2n_hash_new(&message_hash));
- GUARD(s2n_hash_init(&message_hash, chosen_sig_scheme->hash_alg));
+ POSIX_GUARD(s2n_hash_new(&message_hash));
+ POSIX_GUARD(s2n_hash_init(&message_hash, chosen_sig_scheme->hash_alg));
DEFER_CLEANUP(struct s2n_stuffer unsigned_content = {0}, s2n_stuffer_free);
- GUARD(s2n_tls13_generate_unsigned_cert_verify_content(conn, &unsigned_content, conn->mode));
+ POSIX_GUARD(s2n_tls13_generate_unsigned_cert_verify_content(conn, &unsigned_content, conn->mode));
- GUARD(s2n_hash_update(&message_hash, unsigned_content.blob.data, s2n_stuffer_data_available(&unsigned_content)));
+ POSIX_GUARD(s2n_hash_update(&message_hash, unsigned_content.blob.data, s2n_stuffer_data_available(&unsigned_content)));
S2N_ASYNC_PKEY_SIGN(conn, chosen_sig_scheme->sig_alg, &message_hash, s2n_tls13_write_signature);
}
@@ -94,8 +94,8 @@ int s2n_tls13_write_signature(struct s2n_connection *conn, struct s2n_blob *sign
{
struct s2n_stuffer *out = &conn->handshake.io;
- GUARD(s2n_stuffer_write_uint16(out, signature->size));
- GUARD(s2n_stuffer_write_bytes(out, signature->data, signature->size));
+ POSIX_GUARD(s2n_stuffer_write_uint16(out, signature->size));
+ POSIX_GUARD(s2n_stuffer_write_bytes(out, signature->data, signature->size));
return 0;
}
@@ -104,30 +104,26 @@ int s2n_tls13_generate_unsigned_cert_verify_content(struct s2n_connection *conn,
{
s2n_tls13_connection_keys(tls13_ctx, conn);
- struct s2n_hash_state handshake_hash, hash_copy;
uint8_t hash_digest_length = tls13_ctx.size;
uint8_t digest_out[S2N_MAX_DIGEST_LEN];
/* Get current handshake hash */
- GUARD(s2n_handshake_get_hash_state(conn, tls13_ctx.hash_algorithm, &handshake_hash));
-
- /* Copy current hash content */
- GUARD(s2n_hash_new(&hash_copy));
- GUARD(s2n_hash_copy(&hash_copy, &handshake_hash));
- GUARD(s2n_hash_digest(&hash_copy, digest_out, hash_digest_length));
- GUARD(s2n_hash_free(&hash_copy));
+ POSIX_ENSURE_REF(conn->handshake.hashes);
+ struct s2n_hash_state *hash_state = &conn->handshake.hashes->hash_workspace;
+ POSIX_GUARD_RESULT(s2n_handshake_copy_hash_state(conn, tls13_ctx.hash_algorithm, hash_state));
+ POSIX_GUARD(s2n_hash_digest(hash_state, digest_out, hash_digest_length));
/* Concatenate the content to be signed/verified */
- GUARD(s2n_stuffer_alloc(unsigned_content, hash_digest_length + s2n_tls13_cert_verify_header_length(mode)));
- GUARD(s2n_stuffer_write_bytes(unsigned_content, S2N_CERT_VERIFY_PREFIX, sizeof(S2N_CERT_VERIFY_PREFIX)));
+ POSIX_GUARD(s2n_stuffer_alloc(unsigned_content, hash_digest_length + s2n_tls13_cert_verify_header_length(mode)));
+ POSIX_GUARD(s2n_stuffer_write_bytes(unsigned_content, S2N_CERT_VERIFY_PREFIX, sizeof(S2N_CERT_VERIFY_PREFIX)));
if (mode == S2N_CLIENT) {
- GUARD(s2n_stuffer_write_bytes(unsigned_content, S2N_CLIENT_CERT_VERIFY_CONTEXT, sizeof(S2N_CLIENT_CERT_VERIFY_CONTEXT)));
+ POSIX_GUARD(s2n_stuffer_write_bytes(unsigned_content, S2N_CLIENT_CERT_VERIFY_CONTEXT, sizeof(S2N_CLIENT_CERT_VERIFY_CONTEXT)));
} else {
- GUARD(s2n_stuffer_write_bytes(unsigned_content, S2N_SERVER_CERT_VERIFY_CONTEXT, sizeof(S2N_SERVER_CERT_VERIFY_CONTEXT)));
+ POSIX_GUARD(s2n_stuffer_write_bytes(unsigned_content, S2N_SERVER_CERT_VERIFY_CONTEXT, sizeof(S2N_SERVER_CERT_VERIFY_CONTEXT)));
}
- GUARD(s2n_stuffer_write_bytes(unsigned_content, digest_out, hash_digest_length));
+ POSIX_GUARD(s2n_stuffer_write_bytes(unsigned_content, digest_out, hash_digest_length));
return 0;
}
@@ -144,16 +140,16 @@ int s2n_tls13_cert_verify_recv(struct s2n_connection *conn)
{
if (conn->mode == S2N_SERVER) {
/* Read the algorithm and update sig_scheme */
- GUARD(s2n_get_and_validate_negotiated_signature_scheme(conn, &conn->handshake.io, &conn->secure.client_cert_sig_scheme));
+ POSIX_GUARD(s2n_get_and_validate_negotiated_signature_scheme(conn, &conn->handshake.io, &conn->handshake_params.client_cert_sig_scheme));
/* Read the rest of the signature and verify */
- GUARD(s2n_tls13_cert_read_and_verify_signature(conn, &conn->secure.client_cert_sig_scheme));
+ POSIX_GUARD(s2n_tls13_cert_read_and_verify_signature(conn, &conn->handshake_params.client_cert_sig_scheme));
} else {
/* Read the algorithm and update sig_scheme */
- GUARD(s2n_get_and_validate_negotiated_signature_scheme(conn, &conn->handshake.io, &conn->secure.conn_sig_scheme));
+ POSIX_GUARD(s2n_get_and_validate_negotiated_signature_scheme(conn, &conn->handshake.io, &conn->handshake_params.conn_sig_scheme));
/* Read the rest of the signature and verify */
- GUARD(s2n_tls13_cert_read_and_verify_signature(conn, &conn->secure.conn_sig_scheme));
+ POSIX_GUARD(s2n_tls13_cert_read_and_verify_signature(conn, &conn->handshake_params.conn_sig_scheme));
}
return 0;
@@ -165,32 +161,32 @@ int s2n_tls13_cert_read_and_verify_signature(struct s2n_connection *conn, struct
DEFER_CLEANUP(struct s2n_blob signed_content = {0}, s2n_free);
DEFER_CLEANUP(struct s2n_stuffer unsigned_content = {0}, s2n_stuffer_free);
DEFER_CLEANUP(struct s2n_hash_state message_hash = {0}, s2n_hash_free);
- GUARD(s2n_hash_new(&message_hash));
+ POSIX_GUARD(s2n_hash_new(&message_hash));
/* Get signature size */
uint16_t signature_size;
- GUARD(s2n_stuffer_read_uint16(in, &signature_size));
+ POSIX_GUARD(s2n_stuffer_read_uint16(in, &signature_size));
S2N_ERROR_IF(signature_size > s2n_stuffer_data_available(in), S2N_ERR_BAD_MESSAGE);
/* Get wire signature */
- GUARD(s2n_alloc(&signed_content, signature_size));
+ POSIX_GUARD(s2n_alloc(&signed_content, signature_size));
signed_content.size = signature_size;
- GUARD(s2n_stuffer_read_bytes(in, signed_content.data, signature_size));
+ POSIX_GUARD(s2n_stuffer_read_bytes(in, signed_content.data, signature_size));
/* Verify signature. We send the opposite mode as we are trying to verify what was sent to us */
if (conn->mode == S2N_CLIENT) {
- GUARD(s2n_tls13_generate_unsigned_cert_verify_content(conn, &unsigned_content, S2N_SERVER));
+ POSIX_GUARD(s2n_tls13_generate_unsigned_cert_verify_content(conn, &unsigned_content, S2N_SERVER));
} else {
- GUARD(s2n_tls13_generate_unsigned_cert_verify_content(conn, &unsigned_content, S2N_CLIENT));
+ POSIX_GUARD(s2n_tls13_generate_unsigned_cert_verify_content(conn, &unsigned_content, S2N_CLIENT));
}
- GUARD(s2n_hash_init(&message_hash, chosen_sig_scheme->hash_alg));
- GUARD(s2n_hash_update(&message_hash, unsigned_content.blob.data, s2n_stuffer_data_available(&unsigned_content)));
+ POSIX_GUARD(s2n_hash_init(&message_hash, chosen_sig_scheme->hash_alg));
+ POSIX_GUARD(s2n_hash_update(&message_hash, unsigned_content.blob.data, s2n_stuffer_data_available(&unsigned_content)));
if (conn->mode == S2N_CLIENT) {
- GUARD(s2n_pkey_verify(&conn->secure.server_public_key, chosen_sig_scheme->sig_alg, &message_hash, &signed_content));
+ POSIX_GUARD(s2n_pkey_verify(&conn->handshake_params.server_public_key, chosen_sig_scheme->sig_alg, &message_hash, &signed_content));
} else {
- GUARD(s2n_pkey_verify(&conn->secure.client_public_key, chosen_sig_scheme->sig_alg, &message_hash, &signed_content));
+ POSIX_GUARD(s2n_pkey_verify(&conn->handshake_params.client_public_key, chosen_sig_scheme->sig_alg, &message_hash, &signed_content));
}
return 0;
diff --git a/contrib/restricted/aws/s2n/tls/s2n_tls13_handshake.c b/contrib/restricted/aws/s2n/tls/s2n_tls13_handshake.c
index 4382c02382..e5b2bd3202 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_tls13_handshake.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_tls13_handshake.c
@@ -15,358 +15,143 @@
#include "tls/s2n_tls13_handshake.h"
#include "tls/s2n_cipher_suites.h"
+#include "tls/s2n_key_log.h"
#include "tls/s2n_security_policies.h"
static int s2n_zero_sequence_number(struct s2n_connection *conn, s2n_mode mode)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
struct s2n_blob sequence_number;
if (mode == S2N_CLIENT) {
- GUARD(s2n_blob_init(&sequence_number, conn->secure.client_sequence_number, sizeof(conn->secure.client_sequence_number)));
+ POSIX_GUARD(s2n_blob_init(&sequence_number, conn->secure.client_sequence_number, sizeof(conn->secure.client_sequence_number)));
} else {
- GUARD(s2n_blob_init(&sequence_number, conn->secure.server_sequence_number, sizeof(conn->secure.server_sequence_number)));
+ POSIX_GUARD(s2n_blob_init(&sequence_number, conn->secure.server_sequence_number, sizeof(conn->secure.server_sequence_number)));
}
- GUARD(s2n_blob_zero(&sequence_number));
+ POSIX_GUARD(s2n_blob_zero(&sequence_number));
return S2N_SUCCESS;
}
int s2n_tls13_mac_verify(struct s2n_tls13_keys *keys, struct s2n_blob *finished_verify, struct s2n_blob *wire_verify)
{
- notnull_check(wire_verify->data);
- eq_check(wire_verify->size, keys->size);
+ POSIX_ENSURE_REF(wire_verify->data);
+ POSIX_ENSURE_EQ(wire_verify->size, keys->size);
S2N_ERROR_IF(!s2n_constant_time_equals(finished_verify->data, wire_verify->data, keys->size), S2N_ERR_BAD_MESSAGE);
- return 0;
-}
-
-/*
- * Initializes the tls13_keys struct
- */
-static int s2n_tls13_keys_init_with_ref(struct s2n_tls13_keys *handshake, s2n_hmac_algorithm alg, uint8_t * extract, uint8_t * derive)
-{
- notnull_check(handshake);
-
- handshake->hmac_algorithm = alg;
- GUARD(s2n_hmac_hash_alg(alg, &handshake->hash_algorithm));
- GUARD(s2n_hash_digest_size(handshake->hash_algorithm, &handshake->size));
- GUARD(s2n_blob_init(&handshake->extract_secret, extract, handshake->size));
- GUARD(s2n_blob_init(&handshake->derive_secret, derive, handshake->size));
- GUARD(s2n_hmac_new(&handshake->hmac));
-
- return 0;
+ return S2N_SUCCESS;
}
int s2n_tls13_keys_from_conn(struct s2n_tls13_keys *keys, struct s2n_connection *conn)
{
- GUARD(s2n_tls13_keys_init_with_ref(keys, conn->secure.cipher_suite->prf_alg, conn->secure.rsa_premaster_secret, conn->secure.master_secret));
-
- return 0;
+ POSIX_GUARD(s2n_tls13_keys_init(keys, conn->secure.cipher_suite->prf_alg));
+ return S2N_SUCCESS;
}
int s2n_tls13_compute_ecc_shared_secret(struct s2n_connection *conn, struct s2n_blob *shared_secret) {
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
const struct s2n_ecc_preferences *ecc_preferences = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_preferences));
- notnull_check(ecc_preferences);
-
- struct s2n_ecc_evp_params *server_key = &conn->secure.server_ecc_evp_params;
- notnull_check(server_key);
- notnull_check(server_key->negotiated_curve);
- /* for now we do this tedious loop to find the matching client key selection.
- * this can be simplified if we get an index or a pointer to a specific key */
- int selection = -1;
- for (int i = 0; i < ecc_preferences->count; i++) {
- if (server_key->negotiated_curve->iana_id == ecc_preferences->ecc_curves[i]->iana_id) {
- selection = i;
- break;
- }
- }
+ POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_preferences));
+ POSIX_ENSURE_REF(ecc_preferences);
- S2N_ERROR_IF(selection < 0, S2N_ERR_BAD_KEY_SHARE);
- struct s2n_ecc_evp_params *client_key = &conn->secure.client_ecc_evp_params[selection];
- notnull_check(client_key);
+ struct s2n_ecc_evp_params *server_key = &conn->kex_params.server_ecc_evp_params;
+ POSIX_ENSURE_REF(server_key);
+ POSIX_ENSURE_REF(server_key->negotiated_curve);
+
+ struct s2n_ecc_evp_params *client_key = &conn->kex_params.client_ecc_evp_params;
+ POSIX_ENSURE_REF(client_key);
+ POSIX_ENSURE_REF(client_key->negotiated_curve);
+
+ POSIX_ENSURE_EQ(server_key->negotiated_curve, client_key->negotiated_curve);
if (conn->mode == S2N_CLIENT) {
- GUARD(s2n_ecc_evp_compute_shared_secret_from_params(client_key, server_key, shared_secret));
+ POSIX_GUARD(s2n_ecc_evp_compute_shared_secret_from_params(client_key, server_key, shared_secret));
} else {
- GUARD(s2n_ecc_evp_compute_shared_secret_from_params(server_key, client_key, shared_secret));
+ POSIX_GUARD(s2n_ecc_evp_compute_shared_secret_from_params(server_key, client_key, shared_secret));
}
- return 0;
+ return S2N_SUCCESS;
}
/* Computes the ECDHE+PQKEM hybrid shared secret as defined in
* https://tools.ietf.org/html/draft-stebila-tls-hybrid-design */
int s2n_tls13_compute_pq_hybrid_shared_secret(struct s2n_connection *conn, struct s2n_blob *shared_secret) {
- notnull_check(conn);
- notnull_check(shared_secret);
+ POSIX_ENSURE_REF(conn);
+ POSIX_ENSURE_REF(shared_secret);
- /* conn->secure.server_ecc_evp_params should be set only during a classic/non-hybrid handshake */
- eq_check(NULL, conn->secure.server_ecc_evp_params.negotiated_curve);
- eq_check(NULL, conn->secure.server_ecc_evp_params.evp_pkey);
+ /* conn->kex_params.server_ecc_evp_params should be set only during a classic/non-hybrid handshake */
+ POSIX_ENSURE_EQ(NULL, conn->kex_params.server_ecc_evp_params.negotiated_curve);
+ POSIX_ENSURE_EQ(NULL, conn->kex_params.server_ecc_evp_params.evp_pkey);
- struct s2n_kem_group_params *server_kem_group_params = &conn->secure.server_kem_group_params;
- notnull_check(server_kem_group_params);
+ struct s2n_kem_group_params *server_kem_group_params = &conn->kex_params.server_kem_group_params;
+ POSIX_ENSURE_REF(server_kem_group_params);
struct s2n_ecc_evp_params *server_ecc_params = &server_kem_group_params->ecc_params;
- notnull_check(server_ecc_params);
+ POSIX_ENSURE_REF(server_ecc_params);
- struct s2n_kem_group_params *client_kem_group_params = conn->secure.chosen_client_kem_group_params;
- notnull_check(client_kem_group_params);
+ struct s2n_kem_group_params *client_kem_group_params = &conn->kex_params.client_kem_group_params;
+ POSIX_ENSURE_REF(client_kem_group_params);
struct s2n_ecc_evp_params *client_ecc_params = &client_kem_group_params->ecc_params;
- notnull_check(client_ecc_params);
+ POSIX_ENSURE_REF(client_ecc_params);
DEFER_CLEANUP(struct s2n_blob ecdhe_shared_secret = { 0 }, s2n_blob_zeroize_free);
/* Compute the ECDHE shared secret, and retrieve the PQ shared secret. */
if (conn->mode == S2N_CLIENT) {
- GUARD(s2n_ecc_evp_compute_shared_secret_from_params(client_ecc_params, server_ecc_params, &ecdhe_shared_secret));
+ POSIX_GUARD(s2n_ecc_evp_compute_shared_secret_from_params(client_ecc_params, server_ecc_params, &ecdhe_shared_secret));
} else {
- GUARD(s2n_ecc_evp_compute_shared_secret_from_params(server_ecc_params, client_ecc_params, &ecdhe_shared_secret));
+ POSIX_GUARD(s2n_ecc_evp_compute_shared_secret_from_params(server_ecc_params, client_ecc_params, &ecdhe_shared_secret));
}
struct s2n_blob *pq_shared_secret = &client_kem_group_params->kem_params.shared_secret;
- notnull_check(pq_shared_secret);
- notnull_check(pq_shared_secret->data);
+ POSIX_ENSURE_REF(pq_shared_secret);
+ POSIX_ENSURE_REF(pq_shared_secret->data);
- const struct s2n_kem_group *negotiated_kem_group = conn->secure.server_kem_group_params.kem_group;
- notnull_check(negotiated_kem_group);
- notnull_check(negotiated_kem_group->kem);
+ const struct s2n_kem_group *negotiated_kem_group = conn->kex_params.server_kem_group_params.kem_group;
+ POSIX_ENSURE_REF(negotiated_kem_group);
+ POSIX_ENSURE_REF(negotiated_kem_group->kem);
- eq_check(pq_shared_secret->size, negotiated_kem_group->kem->shared_secret_key_length);
+ POSIX_ENSURE_EQ(pq_shared_secret->size, negotiated_kem_group->kem->shared_secret_key_length);
/* Construct the concatenated/hybrid shared secret */
uint32_t hybrid_shared_secret_size = ecdhe_shared_secret.size + negotiated_kem_group->kem->shared_secret_key_length;
- GUARD(s2n_alloc(shared_secret, hybrid_shared_secret_size));
+ POSIX_GUARD(s2n_alloc(shared_secret, hybrid_shared_secret_size));
struct s2n_stuffer stuffer_combiner = { 0 };
- GUARD(s2n_stuffer_init(&stuffer_combiner, shared_secret));
- GUARD(s2n_stuffer_write(&stuffer_combiner, &ecdhe_shared_secret));
- GUARD(s2n_stuffer_write(&stuffer_combiner, pq_shared_secret));
-
- /* No longer need PQ shared secret or ECC keys */
- GUARD(s2n_kem_group_free(server_kem_group_params));
- GUARD(s2n_kem_group_free(client_kem_group_params));
+ POSIX_GUARD(s2n_stuffer_init(&stuffer_combiner, shared_secret));
+ POSIX_GUARD(s2n_stuffer_write(&stuffer_combiner, &ecdhe_shared_secret));
+ POSIX_GUARD(s2n_stuffer_write(&stuffer_combiner, pq_shared_secret));
return S2N_SUCCESS;
}
static int s2n_tls13_pq_hybrid_supported(struct s2n_connection *conn) {
- return conn->secure.server_kem_group_params.kem_group != NULL;
+ return conn->kex_params.server_kem_group_params.kem_group != NULL;
}
int s2n_tls13_compute_shared_secret(struct s2n_connection *conn, struct s2n_blob *shared_secret)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
if (s2n_tls13_pq_hybrid_supported(conn)) {
- GUARD(s2n_tls13_compute_pq_hybrid_shared_secret(conn, shared_secret));
+ POSIX_GUARD(s2n_tls13_compute_pq_hybrid_shared_secret(conn, shared_secret));
} else {
- GUARD(s2n_tls13_compute_ecc_shared_secret(conn, shared_secret));
+ POSIX_GUARD(s2n_tls13_compute_ecc_shared_secret(conn, shared_secret));
}
- return S2N_SUCCESS;
-}
+ POSIX_GUARD_RESULT(s2n_connection_wipe_all_keyshares(conn));
-/*
- * This function executes after Server Hello is processed
- * and handshake hashes are computed. It produces and configure
- * the shared secret, handshake secrets, handshake traffic keys,
- * and finished keys.
- */
-int s2n_tls13_handle_handshake_secrets(struct s2n_connection *conn)
-{
- notnull_check(conn);
- const struct s2n_ecc_preferences *ecc_preferences = NULL;
- GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_preferences));
- notnull_check(ecc_preferences);
-
- /* get tls13 key context */
- s2n_tls13_connection_keys(secrets, conn);
-
- /* get shared secret */
- DEFER_CLEANUP(struct s2n_blob shared_secret = { 0 }, s2n_free);
- GUARD(s2n_tls13_compute_shared_secret(conn, &shared_secret));
-
- /* derive early secrets */
- GUARD(s2n_tls13_derive_early_secrets(&secrets, conn->psk_params.chosen_psk));
- /* since early secrets have been computed, PSKs are no longer needed and can be cleaned up */
- GUARD_AS_POSIX(s2n_psk_parameters_wipe(&conn->psk_params));
-
- /* produce handshake secrets */
- s2n_stack_blob(client_hs_secret, secrets.size, S2N_TLS13_SECRET_MAX_LEN);
- s2n_stack_blob(server_hs_secret, secrets.size, S2N_TLS13_SECRET_MAX_LEN);
-
- struct s2n_hash_state hash_state = {0};
- GUARD(s2n_handshake_get_hash_state(conn, secrets.hash_algorithm, &hash_state));
- GUARD(s2n_tls13_derive_handshake_secrets(&secrets, &shared_secret, &hash_state, &client_hs_secret, &server_hs_secret));
-
- /* trigger secret callbacks */
- if (conn->secret_cb && conn->config->quic_enabled) {
- GUARD(conn->secret_cb(conn->secret_cb_context, conn, S2N_CLIENT_HANDSHAKE_TRAFFIC_SECRET,
- client_hs_secret.data, client_hs_secret.size));
- GUARD(conn->secret_cb(conn->secret_cb_context, conn, S2N_SERVER_HANDSHAKE_TRAFFIC_SECRET,
- server_hs_secret.data, server_hs_secret.size));
- }
-
- /* produce handshake traffic keys and configure record algorithm */
- s2n_tls13_key_blob(server_hs_key, conn->secure.cipher_suite->record_alg->cipher->key_material_size);
- struct s2n_blob server_hs_iv = { .data = conn->secure.server_implicit_iv, .size = S2N_TLS13_FIXED_IV_LEN };
- GUARD(s2n_tls13_derive_traffic_keys(&secrets, &server_hs_secret, &server_hs_key, &server_hs_iv));
-
- s2n_tls13_key_blob(client_hs_key, conn->secure.cipher_suite->record_alg->cipher->key_material_size);
- struct s2n_blob client_hs_iv = { .data = conn->secure.client_implicit_iv, .size = S2N_TLS13_FIXED_IV_LEN };
- GUARD(s2n_tls13_derive_traffic_keys(&secrets, &client_hs_secret, &client_hs_key, &client_hs_iv));
-
- GUARD(conn->secure.cipher_suite->record_alg->cipher->init(&conn->secure.server_key));
- GUARD(conn->secure.cipher_suite->record_alg->cipher->init(&conn->secure.client_key));
-
- if (conn->mode == S2N_CLIENT) {
- GUARD(conn->secure.cipher_suite->record_alg->cipher->set_decryption_key(&conn->secure.server_key, &server_hs_key));
- GUARD(conn->secure.cipher_suite->record_alg->cipher->set_encryption_key(&conn->secure.client_key, &client_hs_key));
- } else {
- GUARD(conn->secure.cipher_suite->record_alg->cipher->set_encryption_key(&conn->secure.server_key, &server_hs_key));
- GUARD(conn->secure.cipher_suite->record_alg->cipher->set_decryption_key(&conn->secure.client_key, &client_hs_key));
- }
+ /* It would make more sense to wipe the PSK secrets in s2n_tls13_handle_early_secret,
+ * but at that point we don't know whether or not the server will request a HRR request
+ * and we'll have to use the secrets again.
+ *
+ * Instead, wipe them here when we wipe all the other connection secrets. */
+ POSIX_GUARD_RESULT(s2n_psk_parameters_wipe_secrets(&conn->psk_params));
- /* calculate server + client finished keys and store them in handshake struct */
- struct s2n_blob server_finished_key = { .data = conn->handshake.server_finished, .size = secrets.size };
- struct s2n_blob client_finished_key = { .data = conn->handshake.client_finished, .size = secrets.size };
- GUARD(s2n_tls13_derive_finished_key(&secrets, &server_hs_secret, &server_finished_key));
- GUARD(s2n_tls13_derive_finished_key(&secrets, &client_hs_secret, &client_finished_key));
-
- /* since shared secret has been computed, clean up keys */
- GUARD(s2n_ecc_evp_params_free(&conn->secure.server_ecc_evp_params));
- for (int i = 0; i < ecc_preferences->count; i++) {
- GUARD(s2n_ecc_evp_params_free(&conn->secure.client_ecc_evp_params[i]));
- }
-
- /* According to https://tools.ietf.org/html/rfc8446#section-5.3:
- * Each sequence number is set to zero at the beginning of a connection and
- * whenever the key is changed
- */
- GUARD(s2n_zero_sequence_number(conn, S2N_CLIENT));
- GUARD(s2n_zero_sequence_number(conn, S2N_SERVER));
-
- return 0;
-}
-
-static int s2n_tls13_handle_application_secret(struct s2n_connection *conn, s2n_mode mode)
-{
- /* get tls13 key context */
- s2n_tls13_connection_keys(keys, conn);
- bool is_sending_secret = (mode == conn->mode);
-
- uint8_t *app_secret_data, *implicit_iv_data;
- struct s2n_session_key *session_key;
- s2n_secret_type_t secret_type;
- if (mode == S2N_CLIENT) {
- app_secret_data = conn->secure.client_app_secret;
- implicit_iv_data = conn->secure.client_implicit_iv;
- session_key = &conn->secure.client_key;
- secret_type = S2N_CLIENT_APPLICATION_TRAFFIC_SECRET;
- } else {
- app_secret_data = conn->secure.server_app_secret;
- implicit_iv_data = conn->secure.server_implicit_iv;
- session_key = &conn->secure.server_key;
- secret_type = S2N_SERVER_APPLICATION_TRAFFIC_SECRET;
- }
-
- /* use frozen hashes during the server finished state */
- struct s2n_hash_state *hash_state;
- GUARD_NONNULL(hash_state = &conn->handshake.server_finished_copy);
-
- /* calculate secret */
- struct s2n_blob app_secret = { .data = app_secret_data, .size = keys.size };
- GUARD(s2n_tls13_derive_application_secret(&keys, hash_state, &app_secret, mode));
-
- /* trigger secret callback */
- if (conn->secret_cb && conn->config->quic_enabled) {
- GUARD(conn->secret_cb(conn->secret_cb_context, conn, secret_type,
- app_secret.data, app_secret.size));
- }
-
- /* derive key from secret */
- s2n_tls13_key_blob(app_key, conn->secure.cipher_suite->record_alg->cipher->key_material_size);
- struct s2n_blob app_iv = { .data = implicit_iv_data, .size = S2N_TLS13_FIXED_IV_LEN };
- GUARD(s2n_tls13_derive_traffic_keys(&keys, &app_secret, &app_key, &app_iv));
-
- /* update record algorithm secrets */
- if (is_sending_secret) {
- GUARD(conn->secure.cipher_suite->record_alg->cipher->set_encryption_key(session_key, &app_key));
- } else {
- GUARD(conn->secure.cipher_suite->record_alg->cipher->set_decryption_key(session_key, &app_key));
- }
-
- /* According to https://tools.ietf.org/html/rfc8446#section-5.3:
- * Each sequence number is set to zero at the beginning of a connection and
- * whenever the key is changed
- */
- GUARD(s2n_zero_sequence_number(conn, mode));
-
- return S2N_SUCCESS;
-}
-
-/* The application secrets are derived from the master secret, so the
- * master secret must be handled BEFORE the application secrets.
- */
-static int s2n_tls13_handle_master_secret(struct s2n_connection *conn)
-{
- s2n_tls13_connection_keys(keys, conn);
- GUARD(s2n_tls13_extract_master_secret(&keys));
- return S2N_SUCCESS;
-}
-
-static int s2n_tls13_handle_resumption_master_secret(struct s2n_connection *conn)
-{
- s2n_tls13_connection_keys(keys, conn);
-
- struct s2n_hash_state hash_state = {0};
- GUARD(s2n_handshake_get_hash_state(conn, keys.hash_algorithm, &hash_state));
-
- struct s2n_blob resumption_master_secret = {0};
- GUARD(s2n_blob_init(&resumption_master_secret, conn->resumption_master_secret, keys.size));
- GUARD(s2n_tls13_derive_resumption_master_secret(&keys, &hash_state, &resumption_master_secret));
- return S2N_SUCCESS;
-}
-
-int s2n_tls13_handle_secrets(struct s2n_connection *conn)
-{
- notnull_check(conn);
- if (conn->actual_protocol_version < S2N_TLS13) {
- return S2N_SUCCESS;
- }
-
- switch(s2n_conn_get_current_message_type(conn)) {
- case SERVER_HELLO:
- GUARD(s2n_tls13_handle_handshake_secrets(conn));
- /* Set negotiated crypto parameters for encryption */
- conn->server = &conn->secure;
- conn->client = &conn->secure;
- break;
- case SERVER_FINISHED:
- if (conn->mode == S2N_SERVER) {
- GUARD(s2n_tls13_handle_master_secret(conn));
- GUARD(s2n_tls13_handle_application_secret(conn, S2N_SERVER));
- }
- break;
- case CLIENT_FINISHED:
- if (conn->mode == S2N_CLIENT) {
- GUARD(s2n_tls13_handle_master_secret(conn));
- GUARD(s2n_tls13_handle_application_secret(conn, S2N_SERVER));
- }
- GUARD(s2n_tls13_handle_application_secret(conn, S2N_CLIENT));
- GUARD(s2n_tls13_handle_resumption_master_secret(conn));
- break;
- default:
- break;
- }
return S2N_SUCCESS;
}
int s2n_update_application_traffic_keys(struct s2n_connection *conn, s2n_mode mode, keyupdate_status status)
{
- notnull_check(conn);
+ POSIX_ENSURE_REF(conn);
/* get tls13 key context */
s2n_tls13_connection_keys(keys, conn);
@@ -377,28 +162,28 @@ int s2n_update_application_traffic_keys(struct s2n_connection *conn, s2n_mode mo
if (mode == S2N_CLIENT) {
old_key = &conn->secure.client_key;
- GUARD(s2n_blob_init(&old_app_secret, conn->secure.client_app_secret, keys.size));
- GUARD(s2n_blob_init(&app_iv, conn->secure.client_implicit_iv, S2N_TLS13_FIXED_IV_LEN));
+ POSIX_GUARD(s2n_blob_init(&old_app_secret, conn->secrets.tls13.client_app_secret, keys.size));
+ POSIX_GUARD(s2n_blob_init(&app_iv, conn->secure.client_implicit_iv, S2N_TLS13_FIXED_IV_LEN));
} else {
old_key = &conn->secure.server_key;
- GUARD(s2n_blob_init(&old_app_secret, conn->secure.server_app_secret, keys.size));
- GUARD(s2n_blob_init(&app_iv, conn->secure.server_implicit_iv, S2N_TLS13_FIXED_IV_LEN));
+ POSIX_GUARD(s2n_blob_init(&old_app_secret, conn->secrets.tls13.server_app_secret, keys.size));
+ POSIX_GUARD(s2n_blob_init(&app_iv, conn->secure.server_implicit_iv, S2N_TLS13_FIXED_IV_LEN));
}
/* Produce new application secret */
s2n_stack_blob(app_secret_update, keys.size, S2N_TLS13_SECRET_MAX_LEN);
/* Derives next generation of traffic secret */
- GUARD(s2n_tls13_update_application_traffic_secret(&keys, &old_app_secret, &app_secret_update));
+ POSIX_GUARD(s2n_tls13_update_application_traffic_secret(&keys, &old_app_secret, &app_secret_update));
s2n_tls13_key_blob(app_key, conn->secure.cipher_suite->record_alg->cipher->key_material_size);
/* Derives next generation of traffic key */
- GUARD(s2n_tls13_derive_traffic_keys(&keys, &app_secret_update, &app_key, &app_iv));
+ POSIX_GUARD(s2n_tls13_derive_traffic_keys(&keys, &app_secret_update, &app_key, &app_iv));
if (status == RECEIVING) {
- GUARD(conn->secure.cipher_suite->record_alg->cipher->set_decryption_key(old_key, &app_key));
+ POSIX_GUARD(conn->secure.cipher_suite->record_alg->cipher->set_decryption_key(old_key, &app_key));
} else {
- GUARD(conn->secure.cipher_suite->record_alg->cipher->set_encryption_key(old_key, &app_key));
+ POSIX_GUARD(conn->secure.cipher_suite->record_alg->cipher->set_encryption_key(old_key, &app_key));
}
/* According to https://tools.ietf.org/html/rfc8446#section-5.3:
@@ -406,12 +191,12 @@ int s2n_update_application_traffic_keys(struct s2n_connection *conn, s2n_mode mo
* whenever the key is changed; the first record transmitted under a particular traffic key
* MUST use sequence number 0.
*/
- GUARD(s2n_zero_sequence_number(conn, mode));
+ POSIX_GUARD(s2n_zero_sequence_number(conn, mode));
/* Save updated secret */
struct s2n_stuffer old_secret_stuffer = {0};
- GUARD(s2n_stuffer_init(&old_secret_stuffer, &old_app_secret));
- GUARD(s2n_stuffer_write_bytes(&old_secret_stuffer, app_secret_update.data, keys.size));
+ POSIX_GUARD(s2n_stuffer_init(&old_secret_stuffer, &old_app_secret));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&old_secret_stuffer, app_secret_update.data, keys.size));
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_tls13_handshake.h b/contrib/restricted/aws/s2n/tls/s2n_tls13_handshake.h
index f496677a09..e9785de6d0 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_tls13_handshake.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_tls13_handshake.h
@@ -25,16 +25,16 @@ int s2n_tls13_mac_verify(struct s2n_tls13_keys *keys, struct s2n_blob *finished_
#define s2n_get_hash_state(hash_state, alg, conn) \
struct s2n_hash_state hash_state = {0}; \
- GUARD(s2n_handshake_get_hash_state(conn, alg, &hash_state));
+ POSIX_GUARD(s2n_handshake_get_hash_state(conn, alg, &hash_state));
/* Creates a reference to tls13_keys from connection */
#define s2n_tls13_connection_keys(keys, conn) \
DEFER_CLEANUP(struct s2n_tls13_keys keys = {0}, s2n_tls13_keys_free);\
- GUARD(s2n_tls13_keys_from_conn(&keys, conn));
+ POSIX_GUARD(s2n_tls13_keys_from_conn(&keys, conn));
int s2n_tls13_keys_from_conn(struct s2n_tls13_keys *keys, struct s2n_connection *conn);
-int s2n_tls13_handle_secrets(struct s2n_connection *conn);
+int s2n_tls13_compute_shared_secret(struct s2n_connection *conn, struct s2n_blob *shared_secret);
int s2n_update_application_traffic_keys(struct s2n_connection *conn, s2n_mode mode, keyupdate_status status);
int s2n_server_hello_retry_recreate_transcript(struct s2n_connection *conn);
diff --git a/contrib/restricted/aws/s2n/tls/s2n_tls13_key_schedule.c b/contrib/restricted/aws/s2n/tls/s2n_tls13_key_schedule.c
new file mode 100644
index 0000000000..16fd78c042
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_tls13_key_schedule.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include "tls/s2n_tls13_handshake.h"
+#include "utils/s2n_result.h"
+
+/* The state machine refers to the "master" secret as the "application" secret.
+ * Let's use that terminology here to match.
+ */
+#define S2N_APPLICATION_SECRET S2N_MASTER_SECRET
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-A
+ *# The notation "K_{send,recv} = foo" means "set
+ *# the send/recv key to the given key".
+ */
+#define K_send(conn, secret_type) RESULT_GUARD(s2n_set_key(conn, secret_type, (conn)->mode))
+#define K_recv(conn, secret_type) RESULT_GUARD(s2n_set_key(conn, secret_type, S2N_PEER_MODE((conn)->mode)))
+
+static const struct s2n_blob s2n_zero_length_context = { 0 };
+
+static S2N_RESULT s2n_zero_sequence_number(struct s2n_connection *conn, s2n_mode mode)
+{
+ RESULT_ENSURE_REF(conn);
+ struct s2n_blob sequence_number;
+ if (mode == S2N_CLIENT) {
+ RESULT_GUARD_POSIX(s2n_blob_init(&sequence_number,
+ conn->secure.client_sequence_number, sizeof(conn->secure.client_sequence_number)));
+ } else {
+ RESULT_GUARD_POSIX(s2n_blob_init(&sequence_number,
+ conn->secure.server_sequence_number, sizeof(conn->secure.server_sequence_number)));
+ }
+ RESULT_GUARD_POSIX(s2n_blob_zero(&sequence_number));
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_set_key(struct s2n_connection *conn, s2n_extract_secret_type_t secret_type, s2n_mode mode)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn->secure.cipher_suite);
+ const struct s2n_cipher_suite *cipher_suite = conn->secure.cipher_suite;
+ RESULT_ENSURE_REF(conn->secure.cipher_suite->record_alg);
+ RESULT_ENSURE_REF(conn->secure.cipher_suite->record_alg->cipher);
+ const struct s2n_cipher *cipher = conn->secure.cipher_suite->record_alg->cipher;
+
+ uint8_t *implicit_iv_data = NULL;
+ struct s2n_session_key *session_key = NULL;
+ if (mode == S2N_CLIENT) {
+ implicit_iv_data = conn->secure.client_implicit_iv;
+ session_key = &conn->secure.client_key;
+ conn->client = &conn->secure;
+ } else {
+ implicit_iv_data = conn->secure.server_implicit_iv;
+ session_key = &conn->secure.server_key;
+ conn->server = &conn->secure;
+ }
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.3
+ *# The traffic keying material is generated from the following input
+ *# values:
+ *#
+ *# - A secret value
+ **/
+ struct s2n_blob secret = { 0 };
+ uint8_t secret_bytes[S2N_TLS13_SECRET_MAX_LEN] = { 0 };
+ RESULT_GUARD_POSIX(s2n_blob_init(&secret, secret_bytes, S2N_TLS13_SECRET_MAX_LEN));
+ RESULT_GUARD(s2n_tls13_secrets_get(conn, secret_type, mode, &secret));
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.3
+ *#
+ *# - A purpose value indicating the specific value being generated
+ **/
+ const struct s2n_blob *key_purpose = &s2n_tls13_label_traffic_secret_key;
+ const struct s2n_blob *iv_purpose = &s2n_tls13_label_traffic_secret_iv;
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.3
+ *#
+ *# - The length of the key being generated
+ **/
+ const uint32_t key_size = cipher->key_material_size;
+ const uint32_t iv_size = S2N_TLS13_FIXED_IV_LEN;
+
+ /*
+ * TODO: We should be able to reuse the prf_work_space rather
+ * than allocating a new HMAC every time.
+ * https://github.com/aws/s2n-tls/issues/3206
+ */
+ s2n_hmac_algorithm hmac_alg = cipher_suite->prf_alg;
+ DEFER_CLEANUP(struct s2n_hmac_state hmac = { 0 }, s2n_hmac_free);
+ RESULT_GUARD_POSIX(s2n_hmac_new(&hmac));
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.3
+ *#
+ *# The traffic keying material is generated from an input traffic secret
+ *# value using:
+ *#
+ *# [sender]_write_key = HKDF-Expand-Label(Secret, "key", "", key_length)
+ **/
+ struct s2n_blob key = { 0 };
+ uint8_t key_bytes[S2N_TLS13_SECRET_MAX_LEN] = { 0 };
+ RESULT_GUARD_POSIX(s2n_blob_init(&key, key_bytes, key_size));
+ RESULT_GUARD_POSIX(s2n_hkdf_expand_label(&hmac, hmac_alg,
+ &secret, key_purpose, &s2n_zero_length_context, &key));
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.3
+ *# [sender]_write_iv = HKDF-Expand-Label(Secret, "iv", "", iv_length)
+ **/
+ struct s2n_blob iv = { 0 };
+ RESULT_GUARD_POSIX(s2n_blob_init(&iv, implicit_iv_data, iv_size));
+ RESULT_GUARD_POSIX(s2n_hkdf_expand_label(&hmac, hmac_alg,
+ &secret, iv_purpose, &s2n_zero_length_context, &iv));
+
+ bool is_sending_secret = (mode == conn->mode);
+ if (is_sending_secret) {
+ RESULT_GUARD_POSIX(cipher->set_encryption_key(session_key, &key));
+ } else {
+ RESULT_GUARD_POSIX(cipher->set_decryption_key(session_key, &key));
+ }
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-5.3
+ *# Each sequence number is
+ *# set to zero at the beginning of a connection and whenever the key is
+ *# changed; the first record transmitted under a particular traffic key
+ *# MUST use sequence number 0.
+ */
+ RESULT_GUARD(s2n_zero_sequence_number(conn, mode));
+
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_client_key_schedule(struct s2n_connection *conn)
+{
+ message_type_t message_type = s2n_conn_get_current_message_type(conn);
+
+ /**
+ * How client keys are set varies depending on early data state.
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-A
+ *# Actions which are taken only in certain circumstances
+ *# are indicated in [].
+ */
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-A.1
+ *# START <----+
+ *# Send ClientHello | | Recv HelloRetryRequest
+ *# [K_send = early data] | |
+ */
+ if (message_type == CLIENT_HELLO
+ && conn->early_data_state == S2N_EARLY_DATA_REQUESTED) {
+ K_send(conn, S2N_EARLY_SECRET);
+ }
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-A.1
+ *# v |
+ *# / WAIT_SH ----+
+ *# | | Recv ServerHello
+ *# | | K_recv = handshake
+ */
+ if (message_type == SERVER_HELLO) {
+ K_recv(conn, S2N_HANDSHAKE_SECRET);
+ }
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-A.1
+ *# Can | V
+ *# send | WAIT_EE
+ *# early | | Recv EncryptedExtensions
+ *# data | +--------+--------+
+ *# | Using | | Using certificate
+ *# | PSK | v
+ *# | | WAIT_CERT_CR
+ *# | | Recv | | Recv CertificateRequest
+ *# | | Certificate | v
+ *# | | | WAIT_CERT
+ *# | | | | Recv Certificate
+ *# | | v v
+ *# | | WAIT_CV
+ *# | | | Recv CertificateVerify
+ *# | +> WAIT_FINISHED <+
+ *# | | Recv Finished
+ *# \ | [Send EndOfEarlyData]
+ *# | K_send = handshake
+ */
+ if ((message_type == SERVER_FINISHED && !WITH_EARLY_DATA(conn))
+ || (message_type == END_OF_EARLY_DATA)) {
+ K_send(conn, S2N_HANDSHAKE_SECRET);
+ }
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-A.1
+ *# | [Send Certificate [+ CertificateVerify]]
+ *# Can send | Send Finished
+ *# app data --> | K_send = K_recv = application
+ */
+ if (message_type == CLIENT_FINISHED) {
+ K_send(conn, S2N_APPLICATION_SECRET);
+ K_recv(conn, S2N_APPLICATION_SECRET);
+ }
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-A.1
+ *# after here v
+ *# CONNECTED
+ */
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_server_key_schedule(struct s2n_connection *conn)
+{
+ message_type_t message_type = s2n_conn_get_current_message_type(conn);
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-A.2
+ *# START <-----+
+ *# Recv ClientHello | | Send HelloRetryRequest
+ *# v |
+ *# RECVD_CH ----+
+ *# | Select parameters
+ *# v
+ *# NEGOTIATED
+ *# | Send ServerHello
+ *# | K_send = handshake
+ */
+ if (message_type == SERVER_HELLO) {
+ K_send(conn, S2N_HANDSHAKE_SECRET);
+ }
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-A.2
+ *# | Send EncryptedExtensions
+ *# | [Send CertificateRequest]
+ *# Can send | [Send Certificate + CertificateVerify]
+ *# app data | Send Finished
+ *# after --> | K_send = application
+ */
+ if (message_type == SERVER_FINISHED) {
+ K_send(conn, S2N_APPLICATION_SECRET);
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-A.2
+ *# here +--------+--------+
+ *# No 0-RTT | | 0-RTT
+ *# | |
+ *# K_recv = handshake | | K_recv = early data
+ */
+ if (WITH_EARLY_DATA(conn)) {
+ K_recv(conn, S2N_EARLY_SECRET);
+ } else {
+ K_recv(conn, S2N_HANDSHAKE_SECRET);
+ }
+ }
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-A.2
+ *# [Skip decrypt errors] | +------> WAIT_EOED -+
+ *# | | Recv | | Recv EndOfEarlyData
+ *# | | early data | | K_recv = handshake
+ *# | +------------+ |
+ */
+ if (message_type == END_OF_EARLY_DATA) {
+ K_recv(conn, S2N_HANDSHAKE_SECRET);
+ }
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-A.2
+ *# | |
+ *# +> WAIT_FLIGHT2 <--------+
+ *# |
+ *# +--------+--------+
+ *# No auth | | Client auth
+ *# | |
+ *# | v
+ *# | WAIT_CERT
+ *# | Recv | | Recv Certificate
+ *# | empty | v
+ *# | Certificate | WAIT_CV
+ *# | | | Recv
+ *# | v | CertificateVerify
+ *# +-> WAIT_FINISHED <---+
+ *# | Recv Finished
+ *# | K_recv = application
+ */
+ if (message_type == CLIENT_FINISHED) {
+ K_recv(conn, S2N_APPLICATION_SECRET);
+ }
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#appendix-A.2
+ *# v
+ *# CONNECTED
+ */
+ return S2N_RESULT_OK;
+}
+
+s2n_result (*key_schedules[])(struct s2n_connection*) = {
+ [S2N_CLIENT] = &s2n_client_key_schedule,
+ [S2N_SERVER] = &s2n_server_key_schedule,
+};
+
+S2N_RESULT s2n_tls13_key_schedule_update(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+ if (s2n_connection_get_protocol_version(conn) < S2N_TLS13) {
+ return S2N_RESULT_OK;
+ }
+ RESULT_ENSURE_REF(key_schedules[conn->mode]);
+ RESULT_GUARD(key_schedules[conn->mode](conn));
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_tls13_key_schedule_reset(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+ conn->client = &conn->initial;
+ conn->server = &conn->initial;
+ conn->secrets.tls13.extract_secret_type = S2N_NONE_SECRET;
+ return S2N_RESULT_OK;
+}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_tls13_key_schedule.h b/contrib/restricted/aws/s2n/tls/s2n_tls13_key_schedule.h
new file mode 100644
index 0000000000..0991be9c06
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_tls13_key_schedule.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#pragma once
+
+#include "utils/s2n_result.h"
+
+S2N_RESULT s2n_tls13_key_schedule_update(struct s2n_connection *conn);
+S2N_RESULT s2n_tls13_key_schedule_reset(struct s2n_connection *conn);
+
diff --git a/contrib/restricted/aws/s2n/tls/s2n_tls13_secrets.c b/contrib/restricted/aws/s2n/tls/s2n_tls13_secrets.c
new file mode 100644
index 0000000000..9bbe24ec84
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_tls13_secrets.c
@@ -0,0 +1,626 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include "tls/s2n_tls13_secrets.h"
+
+#include "tls/s2n_connection.h"
+#include "tls/s2n_key_log.h"
+#include "tls/s2n_tls13_handshake.h"
+#include "utils/s2n_bitmap.h"
+
+#define S2N_MAX_HASHLEN SHA384_DIGEST_LENGTH
+
+#define CONN_HMAC_ALG(conn) ((conn)->secure.cipher_suite->prf_alg)
+#define CONN_SECRETS(conn) ((conn)->secrets.tls13)
+#define CONN_HASHES(conn) ((conn)->handshake.hashes)
+
+#define CONN_SECRET(conn, secret) ( \
+ (struct s2n_blob) { .data = CONN_SECRETS(conn).secret, .size = s2n_get_hash_len(CONN_HMAC_ALG(conn))} )
+#define CONN_HASH(conn, hash) ( \
+ (struct s2n_blob) { .data = CONN_HASHES(conn)->hash, .size = s2n_get_hash_len(CONN_HMAC_ALG(conn))} )
+#define CONN_FINISHED(conn, mode) ( \
+ (struct s2n_blob) { .data = (conn)->handshake.mode##_finished, .size = s2n_get_hash_len(CONN_HMAC_ALG(conn))})
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# If a given secret is not available, then the 0-value consisting of a
+ *# string of Hash.length bytes set to zeros is used.
+ */
+static uint8_t zero_value_bytes[S2N_MAX_HASHLEN] = { 0 };
+#define ZERO_VALUE(hmac_alg) ( \
+ (const struct s2n_blob) { .data = zero_value_bytes, .size = s2n_get_hash_len(hmac_alg)})
+
+/**
+ * When an operation doesn't need an actual transcript hash,
+ * it uses an empty transcript hash as an input instead.
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# Note that in some cases a zero-
+ *# length Context (indicated by "") is passed to HKDF-Expand-Label
+ */
+#define EMPTY_CONTEXT(hmac_alg) ( \
+ (const struct s2n_blob) { .data = s2n_get_empty_context(hmac_alg), .size = s2n_get_hash_len(hmac_alg)})
+
+static uint8_t s2n_get_hash_len(s2n_hmac_algorithm hmac_alg)
+{
+ uint8_t hash_size = 0;
+ if (s2n_hmac_digest_size(hmac_alg, &hash_size) != S2N_SUCCESS) {
+ return 0;
+ }
+ return hash_size;
+}
+
+static uint8_t *s2n_get_empty_context(s2n_hmac_algorithm hmac_alg)
+{
+ static uint8_t sha256_empty_digest[S2N_MAX_HASHLEN] = { 0 };
+ static uint8_t sha384_empty_digest[S2N_MAX_HASHLEN] = { 0 };
+
+ switch(hmac_alg) {
+ case S2N_HMAC_SHA256:
+ return sha256_empty_digest;
+ case S2N_HMAC_SHA384:
+ return sha384_empty_digest;
+ default:
+ return NULL;
+ }
+}
+
+static s2n_hmac_algorithm supported_hmacs[] = {
+ S2N_HMAC_SHA256,
+ S2N_HMAC_SHA384
+};
+
+S2N_RESULT s2n_tls13_empty_transcripts_init()
+{
+ DEFER_CLEANUP(struct s2n_hash_state hash = { 0 }, s2n_hash_free);
+ RESULT_GUARD_POSIX(s2n_hash_new(&hash));
+
+ s2n_hash_algorithm hash_alg = S2N_HASH_NONE;
+ for (size_t i = 0; i < s2n_array_len(supported_hmacs); i++) {
+ s2n_hmac_algorithm hmac_alg = supported_hmacs[i];
+ struct s2n_blob digest = EMPTY_CONTEXT(hmac_alg);
+
+ RESULT_GUARD_POSIX(s2n_hmac_hash_alg(hmac_alg, &hash_alg));
+ RESULT_GUARD_POSIX(s2n_hash_init(&hash, hash_alg));
+ RESULT_GUARD_POSIX(s2n_hash_digest(&hash, digest.data, digest.size));
+ }
+
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_calculate_transcript_digest(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn->handshake.hashes);
+
+ s2n_hash_algorithm hash_algorithm = S2N_HASH_NONE;
+ RESULT_ENSURE_REF(conn->secure.cipher_suite);
+ RESULT_GUARD_POSIX(s2n_hmac_hash_alg(conn->secure.cipher_suite->prf_alg, &hash_algorithm));
+
+ uint8_t digest_size = 0;
+ RESULT_GUARD_POSIX(s2n_hash_digest_size(hash_algorithm, &digest_size));
+
+ struct s2n_blob digest = { 0 };
+ RESULT_GUARD_POSIX(s2n_blob_init(&digest, CONN_HASHES(conn)->transcript_hash_digest, digest_size));
+
+ struct s2n_hash_state *hash_state = &conn->handshake.hashes->hash_workspace;
+ RESULT_GUARD(s2n_handshake_copy_hash_state(conn, hash_algorithm, hash_state));
+ RESULT_GUARD_POSIX(s2n_hash_digest(hash_state, digest.data, digest.size));
+
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_extract_secret(s2n_hmac_algorithm hmac_alg,
+ const struct s2n_blob *previous_secret_material, const struct s2n_blob *new_secret_material,
+ struct s2n_blob *output)
+{
+ /*
+ * TODO: We should be able to reuse the prf_work_space rather
+ * than allocating a new HMAC every time.
+ * https://github.com/aws/s2n-tls/issues/3206
+ */
+ DEFER_CLEANUP(struct s2n_hmac_state hmac_state = { 0 }, s2n_hmac_free);
+ RESULT_GUARD_POSIX(s2n_hmac_new(&hmac_state));
+
+ RESULT_GUARD_POSIX(s2n_hkdf_extract(&hmac_state, hmac_alg,
+ previous_secret_material, new_secret_material, output));
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# Derive-Secret(Secret, Label, Messages) =
+ *# HKDF-Expand-Label(Secret, Label,
+ *# Transcript-Hash(Messages), Hash.length)
+ */
+static S2N_RESULT s2n_derive_secret(s2n_hmac_algorithm hmac_alg,
+ const struct s2n_blob *previous_secret_material, const struct s2n_blob *label, const struct s2n_blob *context,
+ struct s2n_blob *output)
+{
+ /*
+ * TODO: We should be able to reuse the prf_work_space rather
+ * than allocating a new HMAC every time.
+ * https://github.com/aws/s2n-tls/issues/3206
+ */
+ DEFER_CLEANUP(struct s2n_hmac_state hmac_state = { 0 }, s2n_hmac_free);
+ RESULT_GUARD_POSIX(s2n_hmac_new(&hmac_state));
+
+ output->size = s2n_get_hash_len(hmac_alg);
+ RESULT_GUARD_POSIX(s2n_hkdf_expand_label(&hmac_state, hmac_alg,
+ previous_secret_material, label, context, output));
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_derive_secret_with_context(struct s2n_connection *conn,
+ s2n_extract_secret_type_t input_secret_type, const struct s2n_blob *label, message_type_t transcript_end_msg,
+ struct s2n_blob *output)
+{
+ RESULT_ENSURE(CONN_SECRETS(conn).extract_secret_type == input_secret_type, S2N_ERR_SECRET_SCHEDULE_STATE);
+ RESULT_ENSURE(s2n_conn_get_current_message_type(conn) == transcript_end_msg, S2N_ERR_SECRET_SCHEDULE_STATE);
+ RESULT_GUARD(s2n_derive_secret(CONN_HMAC_ALG(conn), &CONN_SECRET(conn, extract_secret),
+ label, &CONN_HASH(conn, transcript_hash_digest), output));
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_derive_secret_without_context(struct s2n_connection *conn,
+ s2n_extract_secret_type_t input_secret_type, struct s2n_blob *output)
+{
+ RESULT_ENSURE(CONN_SECRETS(conn).extract_secret_type == input_secret_type, S2N_ERR_SECRET_SCHEDULE_STATE);
+ RESULT_GUARD(s2n_derive_secret(CONN_HMAC_ALG(conn), &CONN_SECRET(conn, extract_secret),
+ &s2n_tls13_label_derived_secret, &EMPTY_CONTEXT(CONN_HMAC_ALG(conn)), output));
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.4.4
+ *# The key used to compute the Finished message is computed from the
+ *# Base Key defined in Section 4.4 using HKDF (see Section 7.1).
+ *# Specifically:
+ *#
+ *# finished_key =
+ *# HKDF-Expand-Label(BaseKey, "finished", "", Hash.length)
+ **/
+S2N_RESULT s2n_tls13_compute_finished_key(s2n_hmac_algorithm hmac_alg,
+ const struct s2n_blob *base_key, struct s2n_blob *output)
+{
+ /*
+ * TODO: We should be able to reuse the prf_work_space rather
+ * than allocating a new HMAC every time.
+ */
+ DEFER_CLEANUP(struct s2n_hmac_state hmac_state = { 0 }, s2n_hmac_free);
+ RESULT_GUARD_POSIX(s2n_hmac_new(&hmac_state));
+
+ RESULT_GUARD_POSIX(s2n_hkdf_expand_label(&hmac_state, hmac_alg,
+ base_key, &s2n_tls13_label_finished, &(struct s2n_blob){0}, output));
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_trigger_secret_callbacks(struct s2n_connection *conn,
+ const struct s2n_blob *secret, s2n_extract_secret_type_t secret_type, s2n_mode mode)
+{
+ static const s2n_secret_type_t conversions[][2] = {
+ [S2N_EARLY_SECRET] = { S2N_CLIENT_EARLY_TRAFFIC_SECRET, S2N_CLIENT_EARLY_TRAFFIC_SECRET },
+ [S2N_HANDSHAKE_SECRET] = { S2N_SERVER_HANDSHAKE_TRAFFIC_SECRET, S2N_CLIENT_HANDSHAKE_TRAFFIC_SECRET },
+ [S2N_MASTER_SECRET] = { S2N_SERVER_APPLICATION_TRAFFIC_SECRET, S2N_CLIENT_APPLICATION_TRAFFIC_SECRET },
+ };
+ s2n_secret_type_t callback_secret_type = conversions[secret_type][mode];
+
+ if (conn->secret_cb && (s2n_connection_is_quic_enabled(conn) || s2n_in_unit_test())) {
+ RESULT_GUARD_POSIX(conn->secret_cb(conn->secret_cb_context, conn, callback_secret_type,
+ secret->data, secret->size));
+ }
+ s2n_result_ignore(s2n_key_log_tls13_secret(conn, secret, callback_secret_type));
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# 0
+ *# |
+ *# v
+ *# PSK -> HKDF-Extract = Early Secret
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# There are multiple potential Early Secret values, depending on which
+ *# PSK the server ultimately selects. The client will need to compute
+ *# one for each potential PSK
+ */
+S2N_RESULT s2n_extract_early_secret(struct s2n_psk *psk)
+{
+ RESULT_ENSURE_REF(psk);
+ RESULT_GUARD_POSIX(s2n_realloc(&psk->early_secret, s2n_get_hash_len(psk->hmac_alg)));
+ RESULT_GUARD(s2n_extract_secret(psk->hmac_alg,
+ &ZERO_VALUE(psk->hmac_alg),
+ &psk->secret,
+ &psk->early_secret));
+ return S2N_RESULT_OK;
+}
+
+/*
+ * When we require an early secret to derive other secrets,
+ * either retrieve the early secret stored on the chosen / early data PSK
+ * or calculate one using a "zero" PSK.
+ */
+static S2N_RESULT s2n_extract_early_secret_for_schedule(struct s2n_connection *conn)
+{
+ struct s2n_psk *psk = conn->psk_params.chosen_psk;
+ s2n_hmac_algorithm hmac_alg = CONN_HMAC_ALG(conn);
+
+ /*
+ * If the client is sending early data, then the PSK is always assumed
+ * to be the first PSK offered.
+ */
+ if (conn->mode == S2N_CLIENT && conn->early_data_state == S2N_EARLY_DATA_REQUESTED) {
+ RESULT_GUARD(s2n_array_get(&conn->psk_params.psk_list, 0, (void**) &psk));
+ RESULT_ENSURE_REF(psk);
+ }
+
+ /**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# if no PSK is selected, it will then need
+ *# to compute the Early Secret corresponding to the zero PSK.
+ */
+ if (psk == NULL) {
+ RESULT_GUARD(s2n_extract_secret(hmac_alg,
+ &ZERO_VALUE(hmac_alg),
+ &ZERO_VALUE(hmac_alg),
+ &CONN_SECRET(conn, extract_secret)));
+ return S2N_RESULT_OK;
+ }
+
+ /*
+ * The early secret is required to generate or verify a PSK's binder,
+ * so must have already been calculated if a valid PSK exists.
+ * Use the early secret stored on the PSK.
+ */
+ RESULT_ENSURE_EQ(hmac_alg, psk->hmac_alg);
+ RESULT_CHECKED_MEMCPY(CONN_SECRETS(conn).extract_secret, psk->early_secret.data, psk->early_secret.size);
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# |
+ *# +-----> Derive-Secret(., "ext binder" | "res binder", "")
+ *# | = binder_key
+ */
+S2N_RESULT s2n_derive_binder_key(struct s2n_psk *psk, struct s2n_blob *output)
+{
+ const struct s2n_blob *label = &s2n_tls13_label_resumption_psk_binder_key;
+ if (psk->type == S2N_PSK_TYPE_EXTERNAL) {
+ label = &s2n_tls13_label_external_psk_binder_key;
+ }
+ RESULT_GUARD(s2n_extract_early_secret(psk));
+ RESULT_GUARD(s2n_derive_secret(psk->hmac_alg,
+ &psk->early_secret,
+ label,
+ &EMPTY_CONTEXT(psk->hmac_alg),
+ output));
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# |
+ *# +-----> Derive-Secret(., "c e traffic", ClientHello)
+ *# | = client_early_traffic_secret
+ */
+static S2N_RESULT s2n_derive_client_early_traffic_secret(struct s2n_connection *conn, struct s2n_blob *output)
+{
+ RESULT_GUARD(s2n_derive_secret_with_context(conn,
+ S2N_EARLY_SECRET,
+ &s2n_tls13_label_client_early_traffic_secret,
+ CLIENT_HELLO,
+ output));
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# |
+ *# v
+ *# Derive-Secret(., "derived", "")
+ *# |
+ *# v
+ *# (EC)DHE -> HKDF-Extract = Handshake Secret
+ */
+static S2N_RESULT s2n_extract_handshake_secret(struct s2n_connection *conn)
+{
+ struct s2n_blob derived_secret = { 0 };
+ uint8_t derived_secret_bytes[S2N_TLS13_SECRET_MAX_LEN] = { 0 };
+ RESULT_GUARD_POSIX(s2n_blob_init(&derived_secret, derived_secret_bytes, S2N_TLS13_SECRET_MAX_LEN));
+ RESULT_GUARD(s2n_derive_secret_without_context(conn, S2N_EARLY_SECRET, &derived_secret));
+
+ DEFER_CLEANUP(struct s2n_blob shared_secret = { 0 }, s2n_blob_zeroize_free);
+ RESULT_GUARD_POSIX(s2n_tls13_compute_shared_secret(conn, &shared_secret));
+
+ RESULT_GUARD(s2n_extract_secret(CONN_HMAC_ALG(conn),
+ &derived_secret,
+ &shared_secret,
+ &CONN_SECRET(conn, extract_secret)));
+
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# |
+ *# +-----> Derive-Secret(., "c hs traffic",
+ *# | ClientHello...ServerHello)
+ *# | = client_handshake_traffic_secret
+ */
+static S2N_RESULT s2n_derive_client_handshake_traffic_secret(struct s2n_connection *conn, struct s2n_blob *output)
+{
+ RESULT_GUARD(s2n_derive_secret_with_context(conn,
+ S2N_HANDSHAKE_SECRET,
+ &s2n_tls13_label_client_handshake_traffic_secret,
+ SERVER_HELLO,
+ output));
+
+ /*
+ * The client finished key needs to be calculated using the
+ * same connection state as the client handshake secret.
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.4.4
+ *# The key used to compute the Finished message is computed from the
+ *# Base Key defined in Section 4.4 using HKDF (see Section 7.1).
+ */
+ RESULT_GUARD(s2n_tls13_compute_finished_key(CONN_HMAC_ALG(conn),
+ output, &CONN_FINISHED(conn, client)));
+
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# |
+ *# +-----> Derive-Secret(., "s hs traffic",
+ *# | ClientHello...ServerHello)
+ *# | = server_handshake_traffic_secret
+ */
+static S2N_RESULT s2n_derive_server_handshake_traffic_secret(struct s2n_connection *conn, struct s2n_blob *output)
+{
+ RESULT_GUARD(s2n_derive_secret_with_context(conn,
+ S2N_HANDSHAKE_SECRET,
+ &s2n_tls13_label_server_handshake_traffic_secret,
+ SERVER_HELLO,
+ output));
+
+ /*
+ * The server finished key needs to be calculated using the
+ * same connection state as the server handshake secret.
+ *
+ *= https://tools.ietf.org/rfc/rfc8446#section-4.4.4
+ *# The key used to compute the Finished message is computed from the
+ *# Base Key defined in Section 4.4 using HKDF (see Section 7.1).
+ */
+ RESULT_GUARD(s2n_tls13_compute_finished_key(CONN_HMAC_ALG(conn),
+ output, &CONN_FINISHED(conn, server)));
+
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# v
+ *# Derive-Secret(., "derived", "")
+ *# |
+ *# v
+ *# 0 -> HKDF-Extract = Master Secret
+ */
+static S2N_RESULT s2n_extract_master_secret(struct s2n_connection *conn)
+{
+ struct s2n_blob derived_secret = { 0 };
+ uint8_t derived_secret_bytes[S2N_TLS13_SECRET_MAX_LEN] = { 0 };
+ RESULT_GUARD_POSIX(s2n_blob_init(&derived_secret, derived_secret_bytes, S2N_TLS13_SECRET_MAX_LEN));
+ RESULT_GUARD(s2n_derive_secret_without_context(conn, S2N_HANDSHAKE_SECRET, &derived_secret));
+
+ RESULT_GUARD(s2n_extract_secret(CONN_HMAC_ALG(conn),
+ &derived_secret,
+ &ZERO_VALUE(CONN_HMAC_ALG(conn)),
+ &CONN_SECRET(conn, extract_secret)));
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# |
+ *# +-----> Derive-Secret(., "c ap traffic",
+ *# | ClientHello...server Finished)
+ *# | = client_application_traffic_secret_0
+ */
+static S2N_RESULT s2n_derive_client_application_traffic_secret(struct s2n_connection *conn, struct s2n_blob *output)
+{
+ RESULT_GUARD(s2n_derive_secret_with_context(conn,
+ S2N_MASTER_SECRET,
+ &s2n_tls13_label_client_application_traffic_secret,
+ SERVER_FINISHED,
+ output));
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# |
+ *# +-----> Derive-Secret(., "s ap traffic",
+ *# | ClientHello...server Finished)
+ *# | = server_application_traffic_secret_0
+ */
+static S2N_RESULT s2n_derive_server_application_traffic_secret(struct s2n_connection *conn, struct s2n_blob *output)
+{
+ RESULT_GUARD(s2n_derive_secret_with_context(conn,
+ S2N_MASTER_SECRET,
+ &s2n_tls13_label_server_application_traffic_secret,
+ SERVER_FINISHED,
+ output));
+ return S2N_RESULT_OK;
+}
+
+/**
+ *= https://tools.ietf.org/rfc/rfc8446#section-7.1
+ *# |
+ *# +-----> Derive-Secret(., "res master",
+ *# ClientHello...client Finished)
+ *# = resumption_master_secret
+ */
+S2N_RESULT s2n_derive_resumption_master_secret(struct s2n_connection *conn)
+{
+ RESULT_GUARD(s2n_derive_secret_with_context(conn,
+ S2N_MASTER_SECRET,
+ &s2n_tls13_label_resumption_master_secret,
+ CLIENT_FINISHED,
+ &CONN_SECRET(conn, resumption_master_secret)));
+ return S2N_RESULT_OK;
+}
+
+static s2n_result (*extract_methods[])(struct s2n_connection *conn) = {
+ [S2N_EARLY_SECRET] = &s2n_extract_early_secret_for_schedule,
+ [S2N_HANDSHAKE_SECRET] = &s2n_extract_handshake_secret,
+ [S2N_MASTER_SECRET] = &s2n_extract_master_secret,
+};
+
+S2N_RESULT s2n_tls13_extract_secret(struct s2n_connection *conn, s2n_extract_secret_type_t secret_type)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(conn->secure.cipher_suite);
+ RESULT_ENSURE_REF(conn->handshake.hashes);
+ RESULT_ENSURE_NE(secret_type, S2N_NONE_SECRET);
+
+ RESULT_ENSURE_GTE(secret_type, 0);
+ RESULT_ENSURE_LT(secret_type, s2n_array_len(extract_methods));
+
+ s2n_extract_secret_type_t next_secret_type = CONN_SECRETS(conn).extract_secret_type + 1;
+ for (s2n_extract_secret_type_t i = next_secret_type; i <= secret_type; i++) {
+ RESULT_ENSURE_REF(extract_methods[i]);
+ RESULT_GUARD(extract_methods[i](conn));
+ CONN_SECRETS(conn).extract_secret_type = i;
+ }
+
+ return S2N_RESULT_OK;
+}
+
+static s2n_result (*derive_methods[][2])(struct s2n_connection *conn, struct s2n_blob *secret) = {
+ [S2N_EARLY_SECRET] = { &s2n_derive_client_early_traffic_secret, &s2n_derive_client_early_traffic_secret },
+ [S2N_HANDSHAKE_SECRET] = { &s2n_derive_server_handshake_traffic_secret, &s2n_derive_client_handshake_traffic_secret },
+ [S2N_MASTER_SECRET] = { &s2n_derive_server_application_traffic_secret, &s2n_derive_client_application_traffic_secret },
+};
+
+S2N_RESULT s2n_tls13_derive_secret(struct s2n_connection *conn, s2n_extract_secret_type_t secret_type,
+ s2n_mode mode, struct s2n_blob *secret)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(secret);
+ RESULT_ENSURE_REF(conn->secure.cipher_suite);
+ RESULT_ENSURE_REF(conn->handshake.hashes);
+ RESULT_ENSURE_NE(secret_type, S2N_NONE_SECRET);
+
+ RESULT_GUARD(s2n_tls13_extract_secret(conn, secret_type));
+
+ RESULT_ENSURE_GTE(secret_type, 0);
+ RESULT_ENSURE_LT(secret_type, s2n_array_len(derive_methods));
+ RESULT_ENSURE_REF(derive_methods[secret_type][mode]);
+ RESULT_GUARD(derive_methods[secret_type][mode](conn, secret));
+
+ RESULT_GUARD(s2n_trigger_secret_callbacks(conn, secret, secret_type, mode));
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_tls13_secrets_clean(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+ if (conn->actual_protocol_version < S2N_TLS13) {
+ return S2N_RESULT_OK;
+ }
+
+ /*
+ * Wipe base secrets.
+ * Not strictly necessary, but probably safer than leaving them.
+ * A compromised secret additionally compromises all secrets derived from it,
+ * so these are the most sensitive secrets.
+ */
+ RESULT_GUARD_POSIX(s2n_blob_zero(&CONN_SECRET(conn, extract_secret)));
+ conn->secrets.tls13.extract_secret_type = S2N_NONE_SECRET;
+
+ /* Wipe other secrets no longer needed */
+ RESULT_GUARD_POSIX(s2n_blob_zero(&CONN_SECRET(conn, client_early_secret)));
+ RESULT_GUARD_POSIX(s2n_blob_zero(&CONN_SECRET(conn, client_handshake_secret)));
+ RESULT_GUARD_POSIX(s2n_blob_zero(&CONN_SECRET(conn, server_handshake_secret)));
+
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_tls13_secrets_update(struct s2n_connection *conn)
+{
+ RESULT_ENSURE_REF(conn);
+ if (s2n_connection_get_protocol_version(conn) < S2N_TLS13) {
+ return S2N_RESULT_OK;
+ }
+ RESULT_ENSURE_REF(conn->secure.cipher_suite);
+
+ message_type_t message_type = s2n_conn_get_current_message_type(conn);
+ switch(message_type) {
+ case CLIENT_HELLO:
+ if (conn->early_data_state == S2N_EARLY_DATA_REQUESTED
+ || conn->early_data_state == S2N_EARLY_DATA_ACCEPTED) {
+ RESULT_GUARD(s2n_calculate_transcript_digest(conn));
+ RESULT_GUARD(s2n_tls13_derive_secret(conn, S2N_EARLY_SECRET,
+ S2N_CLIENT, &CONN_SECRET(conn, client_early_secret)));
+ }
+ break;
+ case SERVER_HELLO:
+ RESULT_GUARD(s2n_calculate_transcript_digest(conn));
+ RESULT_GUARD(s2n_tls13_derive_secret(conn, S2N_HANDSHAKE_SECRET,
+ S2N_CLIENT, &CONN_SECRET(conn, client_handshake_secret)));
+ RESULT_GUARD(s2n_tls13_derive_secret(conn, S2N_HANDSHAKE_SECRET,
+ S2N_SERVER, &CONN_SECRET(conn, server_handshake_secret)));
+ break;
+ case SERVER_FINISHED:
+ RESULT_GUARD(s2n_calculate_transcript_digest(conn));
+ RESULT_GUARD(s2n_tls13_derive_secret(conn, S2N_MASTER_SECRET,
+ S2N_CLIENT, &CONN_SECRET(conn, client_app_secret)));
+ RESULT_GUARD(s2n_tls13_derive_secret(conn, S2N_MASTER_SECRET,
+ S2N_SERVER, &CONN_SECRET(conn, server_app_secret)));
+ break;
+ case CLIENT_FINISHED:
+ RESULT_GUARD(s2n_calculate_transcript_digest(conn));
+ RESULT_GUARD(s2n_derive_resumption_master_secret(conn));
+ break;
+ default:
+ break;
+ }
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_tls13_secrets_get(struct s2n_connection *conn, s2n_extract_secret_type_t secret_type,
+ s2n_mode mode, struct s2n_blob *secret)
+{
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(secret);
+
+ uint8_t *secrets[][2] = {
+ [S2N_EARLY_SECRET] = { NULL, CONN_SECRETS(conn).client_early_secret },
+ [S2N_HANDSHAKE_SECRET] = { CONN_SECRETS(conn).server_handshake_secret, CONN_SECRETS(conn).client_handshake_secret },
+ [S2N_MASTER_SECRET] = { CONN_SECRETS(conn).server_app_secret, CONN_SECRETS(conn).client_app_secret },
+ };
+ RESULT_ENSURE_GT(secret_type, S2N_NONE_SECRET);
+ RESULT_ENSURE_LT(secret_type, s2n_array_len(secrets));
+ RESULT_ENSURE_LTE(secret_type, CONN_SECRETS(conn).extract_secret_type);
+ RESULT_ENSURE_REF(secrets[secret_type][mode]);
+
+ secret->size = s2n_get_hash_len(CONN_HMAC_ALG(conn));
+ RESULT_CHECKED_MEMCPY(secret->data, secrets[secret_type][mode], secret->size);
+ RESULT_ENSURE_GT(secret->size, 0);
+ return S2N_RESULT_OK;
+}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_tls13_secrets.h b/contrib/restricted/aws/s2n/tls/s2n_tls13_secrets.h
new file mode 100644
index 0000000000..37c50a117c
--- /dev/null
+++ b/contrib/restricted/aws/s2n/tls/s2n_tls13_secrets.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#pragma once
+
+#include "tls/s2n_crypto_constants.h"
+#include "tls/s2n_quic_support.h"
+#include "utils/s2n_blob.h"
+#include "utils/s2n_result.h"
+
+/* tls/s2n_tls13_secrets should eventually replace crypto/s2n_tls13_keys.h */
+#include "crypto/s2n_tls13_keys.h"
+
+typedef enum {
+ S2N_NONE_SECRET = 0,
+ S2N_EARLY_SECRET,
+ S2N_HANDSHAKE_SECRET,
+ S2N_MASTER_SECRET
+} s2n_extract_secret_type_t;
+
+struct s2n_tls13_secrets {
+ uint8_t extract_secret[S2N_TLS13_SECRET_MAX_LEN];
+ s2n_extract_secret_type_t extract_secret_type;
+
+ uint8_t client_early_secret[S2N_TLS13_SECRET_MAX_LEN];
+ uint8_t client_handshake_secret[S2N_TLS13_SECRET_MAX_LEN];
+ uint8_t server_handshake_secret[S2N_TLS13_SECRET_MAX_LEN];
+
+ uint8_t client_app_secret[S2N_TLS13_SECRET_MAX_LEN];
+ uint8_t server_app_secret[S2N_TLS13_SECRET_MAX_LEN];
+ uint8_t resumption_master_secret[S2N_TLS13_SECRET_MAX_LEN];
+};
+
+S2N_RESULT s2n_tls13_empty_transcripts_init();
+
+S2N_RESULT s2n_tls13_secrets_update(struct s2n_connection *conn);
+S2N_RESULT s2n_tls13_secrets_get(struct s2n_connection *conn, s2n_extract_secret_type_t secret_type,
+ s2n_mode mode, struct s2n_blob *secret);
+S2N_RESULT s2n_tls13_secrets_clean(struct s2n_connection *conn);
+
+S2N_RESULT s2n_derive_binder_key(struct s2n_psk *psk, struct s2n_blob *output);
+S2N_RESULT s2n_derive_resumption_master_secret(struct s2n_connection *conn);
+
diff --git a/contrib/restricted/aws/s2n/tls/s2n_tls_digest_preferences.h b/contrib/restricted/aws/s2n/tls/s2n_tls_digest_preferences.h
deleted file mode 100644
index 9b856cf481..0000000000
--- a/contrib/restricted/aws/s2n/tls/s2n_tls_digest_preferences.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://aws.amazon.com/apache2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-#pragma once
-
-#include "tls/s2n_tls_parameters.h"
-
-#include "crypto/s2n_hash.h"
-
-/* Table to translate TLS numbers to s2n algorithms */
-static const s2n_hash_algorithm s2n_hash_tls_to_alg[] = {
- [TLS_HASH_ALGORITHM_MD5] = S2N_HASH_MD5,
- [TLS_HASH_ALGORITHM_SHA1] = S2N_HASH_SHA1,
- [TLS_HASH_ALGORITHM_SHA224] = S2N_HASH_SHA224,
- [TLS_HASH_ALGORITHM_SHA256] = S2N_HASH_SHA256,
- [TLS_HASH_ALGORITHM_SHA384] = S2N_HASH_SHA384,
- [TLS_HASH_ALGORITHM_SHA512] = S2N_HASH_SHA512 };
-
-/* Table to translate from s2n algorithm numbers to TLS numbers */
-static const uint8_t s2n_hash_alg_to_tls[] = {
- [S2N_HASH_MD5] = TLS_HASH_ALGORITHM_MD5,
- [S2N_HASH_SHA1] = TLS_HASH_ALGORITHM_SHA1,
- [S2N_HASH_SHA224] = TLS_HASH_ALGORITHM_SHA224,
- [S2N_HASH_SHA256] = TLS_HASH_ALGORITHM_SHA256,
- [S2N_HASH_SHA384] = TLS_HASH_ALGORITHM_SHA384,
- [S2N_HASH_SHA512] = TLS_HASH_ALGORITHM_SHA512 };
-
diff --git a/contrib/restricted/aws/s2n/tls/s2n_tls_parameters.h b/contrib/restricted/aws/s2n/tls/s2n_tls_parameters.h
index 7bce43d0b2..c998d6b1c8 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_tls_parameters.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_tls_parameters.h
@@ -65,20 +65,27 @@
#define TLS_EXTENSION_PQ_KEM_PARAMETERS 0xFE01
#define TLS_PQ_KEM_EXTENSION_ID_BIKE1_L1_R1 1
#define TLS_PQ_KEM_EXTENSION_ID_BIKE1_L1_R2 13
+#define TLS_PQ_KEM_EXTENSION_ID_BIKE1_L1_R3 25
#define TLS_PQ_KEM_EXTENSION_ID_SIKE_P503_R1 10
-#define TLS_PQ_KEM_EXTENSION_ID_SIKE_P434_R2 19
+#define TLS_PQ_KEM_EXTENSION_ID_SIKE_P434_R3 19
#define TLS_PQ_KEM_EXTENSION_ID_KYBER_512_R2 23
#define TLS_PQ_KEM_EXTENSION_ID_KYBER_512_90S_R2 24
+#define TLS_PQ_KEM_EXTENSION_ID_KYBER_512_R3 28
/* TLS 1.3 hybrid post-quantum definitions are from the proposed reserved range defined
- * in https://tools.ietf.org/html/draft-stebila-tls-hybrid-design. Values for interoperability
- * are defined in https://docs.google.com/spreadsheets/d/12YarzaNv3XQNLnvDsWLlRKwtZFhRrDdWf36YlzwrPeg/edit#gid=0. */
-#define TLS_PQ_KEM_GROUP_ID_X25519_SIKE_P434_R2 0x2F27
-#define TLS_PQ_KEM_GROUP_ID_SECP256R1_SIKE_P434_R2 0x2F1F
-#define TLS_PQ_KEM_GROUP_ID_X25519_BIKE1_L1_R2 0x2F28
-#define TLS_PQ_KEM_GROUP_ID_SECP256R1_BIKE1_L1_R2 0x2F23
-#define TLS_PQ_KEM_GROUP_ID_X25519_KYBER_512_R2 0x2F26
-#define TLS_PQ_KEM_GROUP_ID_SECP256R1_KYBER_512_R2 0x2F0F
+ * in https://tools.ietf.org/html/draft-stebila-tls-hybrid-design. Values for interoperability are defined in
+ * https://github.com/open-quantum-safe/openssl/blob/OQS-OpenSSL_1_1_1-stable/oqs-template/oqs-kem-info.md */
+#define TLS_PQ_KEM_GROUP_ID_X25519_SIKE_P434_R3 0x2F27
+#define TLS_PQ_KEM_GROUP_ID_SECP256R1_SIKE_P434_R3 0x2F1F
+#define TLS_PQ_KEM_GROUP_ID_X25519_BIKE1_L1_R2 0x2F28
+#define TLS_PQ_KEM_GROUP_ID_SECP256R1_BIKE1_L1_R2 0x2F23
+#define TLS_PQ_KEM_GROUP_ID_X25519_KYBER_512_R2 0x2F26
+#define TLS_PQ_KEM_GROUP_ID_SECP256R1_KYBER_512_R2 0x2F0F
+#define TLS_PQ_KEM_GROUP_ID_X25519_BIKE_L1_R3 0x2F37
+#define TLS_PQ_KEM_GROUP_ID_SECP256R1_BIKE_L1_R3 0x2F38
+#define TLS_PQ_KEM_GROUP_ID_X25519_KYBER_512_R3 0x2F39
+#define TLS_PQ_KEM_GROUP_ID_SECP256R1_KYBER_512_R3 0x2F3A
+
/* From https://tools.ietf.org/html/rfc7507 */
#define TLS_FALLBACK_SCSV 0x56, 0x00
@@ -100,12 +107,14 @@
#define TLS_EXTENSION_SIGNATURE_ALGORITHMS 13
#define TLS_EXTENSION_ALPN 16
#define TLS_EXTENSION_SCT_LIST 18
+#define TLS_EXTENSION_EMS 23
#define TLS_EXTENSION_SESSION_TICKET 35
#define TLS_EXTENSION_PRE_SHARED_KEY 41
#define TLS_EXTENSION_CERT_AUTHORITIES 47
#define TLS_EXTENSION_RENEGOTIATION_INFO 65281
/* TLS 1.3 extensions from https://tools.ietf.org/html/rfc8446#section-4.2 */
+#define TLS_EXTENSION_EARLY_DATA 42
#define TLS_EXTENSION_SUPPORTED_VERSIONS 43
#define TLS_EXTENSION_COOKIE 44
#define TLS_EXTENSION_PSK_KEY_EXCHANGE_MODES 45
@@ -116,33 +125,12 @@
#define TLS_PSK_DHE_KE_MODE 1
/**
- *= https://tools.ietf.org/id/draft-ietf-quic-tls-32.txt#8.2
+ *= https://tools.ietf.org/rfc/rfc9001.txt#8.2
*# enum {
- *# quic_transport_parameters(0xffa5), (65535)
+ *# quic_transport_parameters(0x39), (65535)
*# } ExtensionType;
*/
-#define TLS_QUIC_TRANSPORT_PARAMETERS 0xffa5
-
-/* TLS Signature Algorithms - RFC 5246 7.4.1.4.1 */
-/* https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-16 */
-#define TLS_SIGNATURE_ALGORITHM_ANONYMOUS 0
-#define TLS_SIGNATURE_ALGORITHM_RSA 1
-#define TLS_SIGNATURE_ALGORITHM_DSA 2
-#define TLS_SIGNATURE_ALGORITHM_ECDSA 3
-#define TLS_SIGNATURE_ALGORITHM_PRIVATE 224
-
-#define TLS_SIGNATURE_ALGORITHM_COUNT 4
-
-/* TLS Hash Algorithm - https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 */
-/* https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-18 */
-#define TLS_HASH_ALGORITHM_ANONYMOUS 0
-#define TLS_HASH_ALGORITHM_MD5 1
-#define TLS_HASH_ALGORITHM_SHA1 2
-#define TLS_HASH_ALGORITHM_SHA224 3
-#define TLS_HASH_ALGORITHM_SHA256 4
-#define TLS_HASH_ALGORITHM_SHA384 5
-#define TLS_HASH_ALGORITHM_SHA512 6
-#define TLS_HASH_ALGORITHM_COUNT 7
+#define TLS_QUIC_TRANSPORT_PARAMETERS 0x39
/* TLS SignatureScheme (Backwards compatible with SigHash and SigAlg values above) */
/* Defined here: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-signaturescheme */
@@ -201,6 +189,7 @@
#define TLS_EC_CURVE_SECP_384_R1 24
#define TLS_EC_CURVE_SECP_521_R1 25
#define TLS_EC_CURVE_ECDH_X25519 29
+#define TLS_EC_CURVE_ECDH_X448 30
/* Ethernet maximum transmission unit (MTU)
* MTU is usually associated with the Ethernet protocol,
@@ -214,21 +203,8 @@
#define TCP_HEADER_LENGTH 20
#define TCP_OPTIONS_LENGTH 40
-/* The maximum size of a TLS record is 16389 bytes. This is; 1 byte for content
- * type, 2 bytes for the protocol version, 2 bytes for the length field,
- * and then up to 2^14 for the encrypted+compressed payload data.
- */
-#define S2N_TLS_RECORD_HEADER_LENGTH 5
-#define S2N_TLS_MAXIMUM_FRAGMENT_LENGTH 16384
-/* Maximum TLS record length allows for 2048 octets of compression expansion and padding */
-#define S2N_TLS_MAXIMUM_RECORD_LENGTH (S2N_TLS_MAXIMUM_FRAGMENT_LENGTH + S2N_TLS_RECORD_HEADER_LENGTH + 2048)
#define S2N_TLS_MAX_FRAG_LEN_EXT_NONE 0
-/* TLS1.3 has a max fragment length of 2^14 + 1 byte for the content type */
-#define S2N_TLS13_MAXIMUM_FRAGMENT_LENGTH 16385
-/* Max encryption overhead is 255 for AEAD padding */
-#define S2N_TLS13_MAXIMUM_RECORD_LENGTH (S2N_TLS13_MAXIMUM_FRAGMENT_LENGTH + S2N_TLS_RECORD_HEADER_LENGTH + 255)
-
/* The maximum size of an SSL2 message is 2^14 - 1, as neither of the first two
* bits in the length field are usable. Per;
* http://www-archive.mozilla.org/projects/security/pki/nss/ssl/draft02.html
@@ -258,7 +234,6 @@
*/
#define S2N_LARGE_RECORD_LENGTH S2N_TLS_MAXIMUM_RECORD_LENGTH
#define S2N_LARGE_FRAGMENT_LENGTH S2N_TLS_MAXIMUM_FRAGMENT_LENGTH
-#define S2N_TLS13_LARGE_FRAGMENT_LENGTH S2N_TLS13_MAXIMUM_FRAGMENT_LENGTH
/* Cap dynamic record resize threshold to 8M */
#define S2N_TLS_MAX_RESIZE_THRESHOLD (1024 * 1024 * 8)
diff --git a/contrib/restricted/aws/s2n/tls/s2n_x509_validator.c b/contrib/restricted/aws/s2n/tls/s2n_x509_validator.c
index da2eea8be6..4506878ced 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_x509_validator.c
+++ b/contrib/restricted/aws/s2n/tls/s2n_x509_validator.c
@@ -30,7 +30,7 @@
#include <openssl/asn1.h>
#include <openssl/x509.h>
-#if !defined(OPENSSL_IS_BORINGSSL) && !defined(OPENSSL_IS_AWSLC)
+#if !defined(OPENSSL_IS_BORINGSSL)
#include <openssl/ocsp.h>
#endif
@@ -42,13 +42,6 @@
/* Time used by default for nextUpdate if none provided in OCSP: 1 hour since thisUpdate. */
#define DEFAULT_OCSP_NEXT_UPDATE_PERIOD 3600000000000
-typedef enum {
- UNINIT,
- INIT,
- VALIDATED,
- OCSP_VALIDATED,
-} validator_state;
-
uint8_t s2n_x509_ocsp_stapling_supported(void) {
return S2N_OCSP_STAPLING_SUPPORTED;
}
@@ -64,13 +57,13 @@ uint8_t s2n_x509_trust_store_has_certs(struct s2n_x509_trust_store *store) {
int s2n_x509_trust_store_from_system_defaults(struct s2n_x509_trust_store *store) {
if (!store->trust_store) {
store->trust_store = X509_STORE_new();
- notnull_check(store->trust_store);
+ POSIX_ENSURE_REF(store->trust_store);
}
int err_code = X509_STORE_set_default_paths(store->trust_store);
if (!err_code) {
s2n_x509_trust_store_wipe(store);
- S2N_ERROR(S2N_ERR_X509_TRUST_STORE);
+ POSIX_BAIL(S2N_ERR_X509_TRUST_STORE);
}
X509_STORE_set_flags(store->trust_store, X509_VP_FLAG_DEFAULT);
@@ -80,8 +73,8 @@ int s2n_x509_trust_store_from_system_defaults(struct s2n_x509_trust_store *store
int s2n_x509_trust_store_add_pem(struct s2n_x509_trust_store *store, const char *pem)
{
- notnull_check(store);
- notnull_check(pem);
+ POSIX_ENSURE_REF(store);
+ POSIX_ENSURE_REF(pem);
if (!store->trust_store) {
store->trust_store = X509_STORE_new();
@@ -90,21 +83,24 @@ int s2n_x509_trust_store_add_pem(struct s2n_x509_trust_store *store, const char
DEFER_CLEANUP(struct s2n_stuffer pem_in_stuffer = {0}, s2n_stuffer_free);
DEFER_CLEANUP(struct s2n_stuffer der_out_stuffer = {0}, s2n_stuffer_free);
- GUARD(s2n_stuffer_alloc_ro_from_string(&pem_in_stuffer, pem));
- GUARD(s2n_stuffer_growable_alloc(&der_out_stuffer, 2048));
+ POSIX_GUARD(s2n_stuffer_alloc_ro_from_string(&pem_in_stuffer, pem));
+ POSIX_GUARD(s2n_stuffer_growable_alloc(&der_out_stuffer, 2048));
do {
DEFER_CLEANUP(struct s2n_blob next_cert = {0}, s2n_free);
- GUARD(s2n_stuffer_certificate_from_pem(&pem_in_stuffer, &der_out_stuffer));
- GUARD(s2n_alloc(&next_cert, s2n_stuffer_data_available(&der_out_stuffer)));
- GUARD(s2n_stuffer_read(&der_out_stuffer, &next_cert));
+ POSIX_GUARD(s2n_stuffer_certificate_from_pem(&pem_in_stuffer, &der_out_stuffer));
+ POSIX_GUARD(s2n_alloc(&next_cert, s2n_stuffer_data_available(&der_out_stuffer)));
+ POSIX_GUARD(s2n_stuffer_read(&der_out_stuffer, &next_cert));
const uint8_t *data = next_cert.data;
DEFER_CLEANUP(X509 *ca_cert = d2i_X509(NULL, &data, next_cert.size), X509_free_pointer);
S2N_ERROR_IF(ca_cert == NULL, S2N_ERR_DECODE_CERTIFICATE);
- GUARD_OSSL(X509_STORE_add_cert(store->trust_store, ca_cert), S2N_ERR_DECODE_CERTIFICATE);
+ if (!X509_STORE_add_cert(store->trust_store, ca_cert)) {
+ unsigned long error = ERR_get_error();
+ POSIX_ENSURE(ERR_GET_REASON(error) == X509_R_CERT_ALREADY_IN_HASH_TABLE, S2N_ERR_DECODE_CERTIFICATE);
+ }
} while (s2n_stuffer_data_available(&pem_in_stuffer));
return 0;
@@ -113,13 +109,13 @@ int s2n_x509_trust_store_add_pem(struct s2n_x509_trust_store *store, const char
int s2n_x509_trust_store_from_ca_file(struct s2n_x509_trust_store *store, const char *ca_pem_filename, const char *ca_dir) {
if (!store->trust_store) {
store->trust_store = X509_STORE_new();
- notnull_check(store->trust_store);
+ POSIX_ENSURE_REF(store->trust_store);
}
int err_code = X509_STORE_load_locations(store->trust_store, ca_pem_filename, ca_dir);
if (!err_code) {
s2n_x509_trust_store_wipe(store);
- S2N_ERROR(S2N_ERR_X509_TRUST_STORE);
+ POSIX_BAIL(S2N_ERR_X509_TRUST_STORE);
}
/* It's a likely scenario if this function is called, a self-signed certificate is used, and that is was generated
@@ -141,7 +137,7 @@ void s2n_x509_trust_store_wipe(struct s2n_x509_trust_store *store) {
}
int s2n_x509_validator_init_no_x509_validation(struct s2n_x509_validator *validator) {
- notnull_check(validator);
+ POSIX_ENSURE_REF(validator);
validator->trust_store = NULL;
validator->store_ctx = NULL;
validator->skip_cert_validation = 1;
@@ -154,7 +150,7 @@ int s2n_x509_validator_init_no_x509_validation(struct s2n_x509_validator *valida
}
int s2n_x509_validator_init(struct s2n_x509_validator *validator, struct s2n_x509_trust_store *trust_store, uint8_t check_ocsp) {
- notnull_check(trust_store);
+ POSIX_ENSURE_REF(trust_store);
validator->trust_store = trust_store;
validator->skip_cert_validation = 0;
validator->check_stapled_ocsp = check_ocsp;
@@ -162,7 +158,7 @@ int s2n_x509_validator_init(struct s2n_x509_validator *validator, struct s2n_x50
validator->store_ctx = NULL;
if (validator->trust_store->trust_store) {
validator->store_ctx = X509_STORE_CTX_new();
- notnull_check(validator->store_ctx);
+ POSIX_ENSURE_REF(validator->store_ctx);
}
validator->cert_chain_from_wire = sk_X509_new_null();
validator->state = INIT;
@@ -190,7 +186,7 @@ void s2n_x509_validator_wipe(struct s2n_x509_validator *validator) {
}
int s2n_x509_validator_set_max_chain_depth(struct s2n_x509_validator *validator, uint16_t max_depth) {
- notnull_check(validator);
+ POSIX_ENSURE_REF(validator);
S2N_ERROR_IF(max_depth == 0, S2N_ERR_INVALID_ARGUMENT);
validator->max_chain_depth = max_depth;
@@ -265,7 +261,7 @@ static uint8_t s2n_verify_host_information(struct s2n_x509_validator *validator,
if (common_name) {
char peer_cn[255];
static size_t peer_cn_size = sizeof(peer_cn);
- memset_check(&peer_cn, 0, peer_cn_size);
+ POSIX_CHECKED_MEMSET(&peer_cn, 0, peer_cn_size);
/* X520CommonName allows the following ANSI string types per RFC 5280 Appendix A.1 */
if (ASN1_STRING_type(common_name) == V_ASN1_TELETEXSTRING ||
@@ -276,8 +272,8 @@ static uint8_t s2n_verify_host_information(struct s2n_x509_validator *validator,
size_t len = (size_t) ASN1_STRING_length(common_name);
- lte_check(len, sizeof(peer_cn) - 1);
- memcpy_check(peer_cn, ASN1_STRING_data(common_name), len);
+ POSIX_ENSURE_LTE(len, sizeof(peer_cn) - 1);
+ POSIX_CHECKED_MEMCPY(peer_cn, ASN1_STRING_data(common_name), len);
verified = conn->verify_host_fn(peer_cn, len, conn->data_for_verify_host);
}
}
@@ -291,7 +287,7 @@ static uint8_t s2n_verify_host_information(struct s2n_x509_validator *validator,
s2n_cert_validation_code s2n_x509_validator_validate_cert_chain(struct s2n_x509_validator *validator, struct s2n_connection *conn,
uint8_t *cert_chain_in, uint32_t cert_chain_len, s2n_pkey_type *pkey_type, struct s2n_pkey *public_key_out) {
S2N_ERROR_IF(!validator->skip_cert_validation && !s2n_x509_trust_store_has_certs(validator->trust_store), S2N_ERR_CERT_UNTRUSTED);
- S2N_ERROR_IF(validator->state != INIT, S2N_ERR_INVALID_STATE);
+ S2N_ERROR_IF(validator->state != INIT, S2N_ERR_INVALID_CERT_STATE);
struct s2n_blob cert_chain_blob = {.data = cert_chain_in, .size = cert_chain_len};
DEFER_CLEANUP(struct s2n_stuffer cert_chain_in_stuffer = {0}, s2n_stuffer_free);
@@ -315,7 +311,7 @@ s2n_cert_validation_code s2n_x509_validator_validate_cert_chain(struct s2n_x509_
struct s2n_blob asn1cert = {0};
asn1cert.size = certificate_size;
asn1cert.data = s2n_stuffer_raw_read(&cert_chain_in_stuffer, certificate_size);
- notnull_check(asn1cert.data);
+ POSIX_ENSURE_REF(asn1cert.data);
const uint8_t *data = asn1cert.data;
@@ -326,11 +322,11 @@ s2n_cert_validation_code s2n_x509_validator_validate_cert_chain(struct s2n_x509_
/* add the cert to the chain. */
if (!sk_X509_push(validator->cert_chain_from_wire, server_cert)) {
X509_free(server_cert);
- S2N_ERROR(S2N_ERR_CERT_UNTRUSTED);
+ POSIX_BAIL(S2N_ERR_CERT_UNTRUSTED);
}
if (!validator->skip_cert_validation) {
- GUARD_AS_POSIX(s2n_validate_certificate_signature(conn, server_cert));
+ POSIX_GUARD_RESULT(s2n_validate_certificate_signature(conn, server_cert));
}
/* Pull the public key from the first certificate */
@@ -341,7 +337,7 @@ s2n_cert_validation_code s2n_x509_validator_validate_cert_chain(struct s2n_x509_
/* certificate extensions is a field in TLS 1.3 - https://tools.ietf.org/html/rfc8446#section-4.4.2 */
if (conn->actual_protocol_version >= S2N_TLS13) {
s2n_parsed_extensions_list parsed_extensions_list = { 0 };
- GUARD(s2n_extension_list_parse(&cert_chain_in_stuffer, &parsed_extensions_list));
+ POSIX_GUARD(s2n_extension_list_parse(&cert_chain_in_stuffer, &parsed_extensions_list));
/* RFC 8446: if an extension applies to the entire chain, it SHOULD be included in the first CertificateEntry */
if (sk_X509_num(validator->cert_chain_from_wire) == 1) {
@@ -379,7 +375,7 @@ s2n_cert_validation_code s2n_x509_validator_validate_cert_chain(struct s2n_x509_
}
if (conn->actual_protocol_version >= S2N_TLS13) {
- GUARD(s2n_extension_list_process(S2N_EXTENSION_LIST_CERTIFICATE, conn, &first_certificate_extensions));
+ POSIX_GUARD(s2n_extension_list_process(S2N_EXTENSION_LIST_CERTIFICATE, conn, &first_certificate_extensions));
}
*public_key_out = public_key;
@@ -398,7 +394,7 @@ s2n_cert_validation_code s2n_x509_validator_validate_cert_stapled_ocsp_response(
return S2N_CERT_OK;
}
- S2N_ERROR_IF(validator->state != VALIDATED, S2N_ERR_INVALID_STATE);
+ S2N_ERROR_IF(validator->state != VALIDATED, S2N_ERR_INVALID_CERT_STATE);
#if !S2N_OCSP_STAPLING_SUPPORTED
/* Default to safety */
@@ -467,13 +463,15 @@ s2n_cert_validation_code s2n_x509_validator_validate_cert_stapled_ocsp_response(
}
/* Important: this checks that the stapled ocsp response CAN be verified, not that it has been verified. */
- const int ocsp_verify_err = OCSP_basic_verify(basic_response, cert_chain, validator->trust_store->trust_store, 0);
- /* do the crypto checks on the response.*/
- if (!ocsp_verify_err) {
- ret_val = S2N_CERT_ERR_UNTRUSTED;
+ const int ocsp_verify_res = OCSP_basic_verify(basic_response, cert_chain, validator->trust_store->trust_store, 0);
+
+ /* OCSP_basic_verify() returns 1 on success, 0 on error, or -1 on fatal error such as malloc failure. */
+ if (ocsp_verify_res != _OSSL_SUCCESS) {
+ ret_val = ocsp_verify_res == 0 ? S2N_CERT_ERR_UNTRUSTED : S2N_CERT_ERR_INTERNAL_ERROR;
goto clean_up;
}
+ /* do the crypto checks on the response.*/
int status = 0;
int reason = 0;
@@ -557,42 +555,42 @@ s2n_cert_validation_code s2n_x509_validator_validate_cert_stapled_ocsp_response(
S2N_RESULT s2n_validate_certificate_signature(struct s2n_connection *conn, X509 *x509_cert)
{
- ENSURE_REF(conn);
- ENSURE_REF(x509_cert);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(x509_cert);
const struct s2n_security_policy *security_policy;
- GUARD_AS_RESULT(s2n_connection_get_security_policy(conn, &security_policy));
+ RESULT_GUARD_POSIX(s2n_connection_get_security_policy(conn, &security_policy));
if (security_policy->certificate_signature_preferences == NULL) {
return S2N_RESULT_OK;
}
X509_NAME *issuer_name = X509_get_issuer_name(x509_cert);
- ENSURE_REF(issuer_name);
+ RESULT_ENSURE_REF(issuer_name);
X509_NAME *subject_name = X509_get_subject_name(x509_cert);
- ENSURE_REF(subject_name);
+ RESULT_ENSURE_REF(subject_name);
/* Do not validate any self-signed certificates */
if (X509_NAME_cmp(issuer_name, subject_name) == 0) {
return S2N_RESULT_OK;
}
- GUARD_RESULT(s2n_validate_sig_scheme_supported(conn, x509_cert, security_policy->certificate_signature_preferences));
+ RESULT_GUARD(s2n_validate_sig_scheme_supported(conn, x509_cert, security_policy->certificate_signature_preferences));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_validate_sig_scheme_supported(struct s2n_connection *conn, X509 *x509_cert, const struct s2n_signature_preferences *cert_sig_preferences)
{
- ENSURE_REF(conn);
- ENSURE_REF(x509_cert);
- ENSURE_REF(cert_sig_preferences);
+ RESULT_ENSURE_REF(conn);
+ RESULT_ENSURE_REF(x509_cert);
+ RESULT_ENSURE_REF(cert_sig_preferences);
int nid = 0;
#if defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER < 0x02070000f)
- ENSURE_REF(x509_cert->sig_alg);
+ RESULT_ENSURE_REF(x509_cert->sig_alg);
nid = OBJ_obj2nid(x509_cert->sig_alg->algorithm);
#else
nid = X509_get_signature_nid(x509_cert);
@@ -602,12 +600,17 @@ S2N_RESULT s2n_validate_sig_scheme_supported(struct s2n_connection *conn, X509 *
if (cert_sig_preferences->signature_schemes[i]->libcrypto_nid == nid) {
/* SHA-1 algorithms are not supported in certificate signatures in TLS1.3 */
- ENSURE(!(conn->actual_protocol_version >= S2N_TLS13 &&
+ RESULT_ENSURE(!(conn->actual_protocol_version >= S2N_TLS13 &&
cert_sig_preferences->signature_schemes[i]->hash_alg == S2N_HASH_SHA1), S2N_ERR_CERT_UNTRUSTED);
return S2N_RESULT_OK;
}
}
- BAIL(S2N_ERR_CERT_UNTRUSTED);
+ RESULT_BAIL(S2N_ERR_CERT_UNTRUSTED);
+}
+
+bool s2n_x509_validator_is_cert_chain_validated(const struct s2n_x509_validator *validator)
+{
+ return validator && (validator->state == VALIDATED || validator->state == OCSP_VALIDATED);
}
diff --git a/contrib/restricted/aws/s2n/tls/s2n_x509_validator.h b/contrib/restricted/aws/s2n/tls/s2n_x509_validator.h
index 9e57bb4d34..b35d535141 100644
--- a/contrib/restricted/aws/s2n/tls/s2n_x509_validator.h
+++ b/contrib/restricted/aws/s2n/tls/s2n_x509_validator.h
@@ -21,13 +21,13 @@
#include <openssl/x509v3.h>
-/* one day, BoringSSL/AWS-LC, may add ocsp stapling support. Let's future proof this a bit by grabbing a definition
+/* one day, BoringSSL may add ocsp stapling support. Let's future proof this a bit by grabbing a definition
* that would have to be there when they add support */
-#if (defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)) && !defined(OCSP_RESPONSE_STATUS_SUCCESSFUL)
+#if defined(OPENSSL_IS_BORINGSSL) && !defined(OCSP_RESPONSE_STATUS_SUCCESSFUL)
#define S2N_OCSP_STAPLING_SUPPORTED 0
#else
#define S2N_OCSP_STAPLING_SUPPORTED 1
-#endif /* (defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)) && !defined(OCSP_RESPONSE_STATUS_SUCCESSFUL) */
+#endif /* defined(OPENSSL_IS_BORINGSSL) && !defined(OCSP_RESPONSE_STATUS_SUCCESSFUL) */
typedef enum {
S2N_CERT_OK = 0,
@@ -36,9 +36,17 @@ typedef enum {
S2N_CERT_ERR_EXPIRED = -3,
S2N_CERT_ERR_TYPE_UNSUPPORTED = -4,
S2N_CERT_ERR_INVALID = -5,
- S2N_CERT_ERR_MAX_CHAIN_DEPTH_EXCEEDED = -6
+ S2N_CERT_ERR_MAX_CHAIN_DEPTH_EXCEEDED = -6,
+ S2N_CERT_ERR_INTERNAL_ERROR = -7
} s2n_cert_validation_code;
+typedef enum {
+ UNINIT,
+ INIT,
+ VALIDATED,
+ OCSP_VALIDATED,
+} validator_state;
+
/** Return TRUE for trusted, FALSE for untrusted **/
typedef uint8_t (*verify_host) (const char *host_name, size_t host_name_len, void *data);
struct s2n_connection;
@@ -125,6 +133,12 @@ s2n_cert_validation_code s2n_x509_validator_validate_cert_stapled_ocsp_response(
const uint8_t *ocsp_response, uint32_t size);
/**
+ * Checks whether the peer's certificate chain has been received and validated.
+ * Should be verified before any use of the peer's certificate data.
+ */
+bool s2n_x509_validator_is_cert_chain_validated(const struct s2n_x509_validator *validator);
+
+/**
* Validates that each certificate in a peer's cert chain contains only signature algorithms in a security policy's
* certificate_signatures_preference list.
*/
diff --git a/contrib/restricted/aws/s2n/utils/s2n_array.c b/contrib/restricted/aws/s2n/utils/s2n_array.c
index ecdf9a2cf2..0ac2207e9a 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_array.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_array.c
@@ -23,35 +23,36 @@
S2N_RESULT s2n_array_validate(const struct s2n_array *array)
{
uint32_t mem_size = 0;
- ENSURE_REF(array);
- GUARD_RESULT(s2n_blob_validate(&array->mem));
- ENSURE_NE(array->element_size, 0);
- GUARD_AS_RESULT(s2n_mul_overflow(array->len, array->element_size, &mem_size));
- ENSURE_GTE(array->mem.size, mem_size);
+ RESULT_ENSURE_REF(array);
+ RESULT_GUARD(s2n_blob_validate(&array->mem));
+ RESULT_ENSURE_NE(array->element_size, 0);
+ RESULT_GUARD_POSIX(s2n_mul_overflow(array->len, array->element_size, &mem_size));
+ RESULT_ENSURE_GTE(array->mem.size, mem_size);
+ RESULT_ENSURE(S2N_IMPLIES(array->mem.size, array->mem.growable), S2N_ERR_SAFETY);
return S2N_RESULT_OK;
}
static S2N_RESULT s2n_array_enlarge(struct s2n_array *array, uint32_t capacity)
{
- ENSURE_REF(array);
+ RESULT_ENSURE_REF(array);
/* Acquire the memory */
uint32_t mem_needed;
- GUARD_AS_RESULT(s2n_mul_overflow(array->element_size, capacity, &mem_needed));
- GUARD_AS_RESULT(s2n_realloc(&array->mem, mem_needed));
+ RESULT_GUARD_POSIX(s2n_mul_overflow(array->element_size, capacity, &mem_needed));
+ RESULT_GUARD_POSIX(s2n_realloc(&array->mem, mem_needed));
/* Zero the extened part */
uint32_t array_elements_size;
- GUARD_AS_RESULT(s2n_mul_overflow(array->element_size, array->len, &array_elements_size));
- CHECKED_MEMSET(array->mem.data + array_elements_size, 0, array->mem.size - array_elements_size);
- GUARD_RESULT(s2n_array_validate(array));
+ RESULT_GUARD_POSIX(s2n_mul_overflow(array->element_size, array->len, &array_elements_size));
+ RESULT_CHECKED_MEMSET(array->mem.data + array_elements_size, 0, array->mem.size - array_elements_size);
+ RESULT_GUARD(s2n_array_validate(array));
return S2N_RESULT_OK;
}
struct s2n_array *s2n_array_new(uint32_t element_size)
{
struct s2n_blob mem = {0};
- GUARD_PTR(s2n_alloc(&mem, sizeof(struct s2n_array)));
+ PTR_GUARD_POSIX(s2n_alloc(&mem, sizeof(struct s2n_array)));
struct s2n_array *array = (void *) mem.data;
@@ -59,7 +60,7 @@ struct s2n_array *s2n_array_new(uint32_t element_size)
if (s2n_result_is_error(s2n_array_enlarge(array, S2N_INITIAL_ARRAY_SIZE))) {
/* Avoid memory leak if allocation fails */
- GUARD_PTR(s2n_free(&mem));
+ PTR_GUARD_POSIX(s2n_free(&mem));
return NULL;
}
return array;
@@ -67,87 +68,87 @@ struct s2n_array *s2n_array_new(uint32_t element_size)
S2N_RESULT s2n_array_init(struct s2n_array *array, uint32_t element_size)
{
- ENSURE_REF(array);
+ RESULT_ENSURE_REF(array);
*array = (struct s2n_array){.element_size = element_size};
- GUARD_RESULT(s2n_array_validate(array));
+ RESULT_GUARD(s2n_array_validate(array));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_array_pushback(struct s2n_array *array, void **element)
{
- GUARD_RESULT(s2n_array_validate(array));
- ENSURE_REF(element);
+ RESULT_GUARD(s2n_array_validate(array));
+ RESULT_ENSURE_REF(element);
return s2n_array_insert(array, array->len, element);
}
-S2N_RESULT s2n_array_get(struct s2n_array *array, uint32_t index, void **element)
+S2N_RESULT s2n_array_get(struct s2n_array *array, uint32_t idx, void **element)
{
- GUARD_RESULT(s2n_array_validate(array));
- ENSURE_REF(element);
- ENSURE(index < array->len, S2N_ERR_ARRAY_INDEX_OOB);
- *element = array->mem.data + (array->element_size * index);
+ RESULT_GUARD(s2n_array_validate(array));
+ RESULT_ENSURE_REF(element);
+ RESULT_ENSURE(idx < array->len, S2N_ERR_ARRAY_INDEX_OOB);
+ *element = array->mem.data + (array->element_size * idx);
return S2N_RESULT_OK;
}
-S2N_RESULT s2n_array_insert_and_copy(struct s2n_array *array, uint32_t index, void* element)
+S2N_RESULT s2n_array_insert_and_copy(struct s2n_array *array, uint32_t idx, void* element)
{
void* insert_location = NULL;
- GUARD_RESULT(s2n_array_insert(array, index, &insert_location));
- CHECKED_MEMCPY(insert_location, element, array->element_size);
+ RESULT_GUARD(s2n_array_insert(array, idx, &insert_location));
+ RESULT_CHECKED_MEMCPY(insert_location, element, array->element_size);
return S2N_RESULT_OK;
}
-S2N_RESULT s2n_array_insert(struct s2n_array *array, uint32_t index, void **element)
+S2N_RESULT s2n_array_insert(struct s2n_array *array, uint32_t idx, void **element)
{
- GUARD_RESULT(s2n_array_validate(array));
- ENSURE_REF(element);
+ RESULT_GUARD(s2n_array_validate(array));
+ RESULT_ENSURE_REF(element);
/* index == len is ok since we're about to add one element */
- ENSURE(index <= array->len, S2N_ERR_ARRAY_INDEX_OOB);
+ RESULT_ENSURE(idx <= array->len, S2N_ERR_ARRAY_INDEX_OOB);
/* We are about to add one more element to the array. Add capacity if necessary */
uint32_t current_capacity = 0;
- GUARD_RESULT(s2n_array_capacity(array, &current_capacity));
+ RESULT_GUARD(s2n_array_capacity(array, &current_capacity));
if (array->len >= current_capacity) {
/* Enlarge the array */
uint32_t new_capacity = 0;
- GUARD_AS_RESULT(s2n_mul_overflow(current_capacity, 2, &new_capacity));
+ RESULT_GUARD_POSIX(s2n_mul_overflow(current_capacity, 2, &new_capacity));
new_capacity = MAX(new_capacity, S2N_INITIAL_ARRAY_SIZE);
- GUARD_RESULT(s2n_array_enlarge(array, new_capacity));
+ RESULT_GUARD(s2n_array_enlarge(array, new_capacity));
}
/* If we are adding at an existing index, slide everything down. */
- if (index < array->len) {
- memmove(array->mem.data + array->element_size * (index + 1),
- array->mem.data + array->element_size * index,
- (array->len - index) * array->element_size);
+ if (idx < array->len) {
+ memmove(array->mem.data + array->element_size * (idx + 1),
+ array->mem.data + array->element_size * idx,
+ (array->len - idx) * array->element_size);
}
- *element = array->mem.data + array->element_size * index;
+ *element = array->mem.data + array->element_size * idx;
array->len++;
- GUARD_RESULT(s2n_array_validate(array));
+ RESULT_GUARD(s2n_array_validate(array));
return S2N_RESULT_OK;
}
-S2N_RESULT s2n_array_remove(struct s2n_array *array, uint32_t index)
+S2N_RESULT s2n_array_remove(struct s2n_array *array, uint32_t idx)
{
- GUARD_RESULT(s2n_array_validate(array));
- ENSURE(index < array->len, S2N_ERR_ARRAY_INDEX_OOB);
+ RESULT_GUARD(s2n_array_validate(array));
+ RESULT_ENSURE(idx < array->len, S2N_ERR_ARRAY_INDEX_OOB);
/* If the removed element is the last one, no need to move anything.
* Otherwise, shift everything down */
- if (index != array->len - 1) {
- memmove(array->mem.data + array->element_size * index,
- array->mem.data + array->element_size * (index + 1),
- (array->len - index - 1) * array->element_size);
+ if (idx != array->len - 1) {
+ memmove(array->mem.data + array->element_size * idx,
+ array->mem.data + array->element_size * (idx + 1),
+ (array->len - idx - 1) * array->element_size);
}
array->len--;
/* After shifting, zero the last element */
- CHECKED_MEMSET(array->mem.data + array->element_size * array->len,
+ RESULT_CHECKED_MEMSET(array->mem.data + array->element_size * array->len,
0,
array->element_size);
@@ -156,8 +157,8 @@ S2N_RESULT s2n_array_remove(struct s2n_array *array, uint32_t index)
S2N_RESULT s2n_array_num_elements(struct s2n_array *array, uint32_t *len)
{
- GUARD_RESULT(s2n_array_validate(array));
- ENSURE_MUT(len);
+ RESULT_GUARD(s2n_array_validate(array));
+ RESULT_ENSURE_MUT(len);
*len = array->len;
@@ -166,8 +167,8 @@ S2N_RESULT s2n_array_num_elements(struct s2n_array *array, uint32_t *len)
S2N_RESULT s2n_array_capacity(struct s2n_array *array, uint32_t *capacity)
{
- GUARD_RESULT(s2n_array_validate(array));
- ENSURE_MUT(capacity);
+ RESULT_GUARD(s2n_array_validate(array));
+ RESULT_ENSURE_MUT(capacity);
*capacity = array->mem.size / array->element_size;
@@ -176,21 +177,21 @@ S2N_RESULT s2n_array_capacity(struct s2n_array *array, uint32_t *capacity)
S2N_RESULT s2n_array_free_p(struct s2n_array **parray)
{
- ENSURE_REF(parray);
+ RESULT_ENSURE_REF(parray);
struct s2n_array *array = *parray;
- ENSURE_REF(array);
+ RESULT_ENSURE_REF(array);
/* Free the elements */
- GUARD_AS_RESULT(s2n_free(&array->mem));
+ RESULT_GUARD_POSIX(s2n_free(&array->mem));
/* And finally the array */
- GUARD_AS_RESULT(s2n_free_object((uint8_t **)parray, sizeof(struct s2n_array)));
+ RESULT_GUARD_POSIX(s2n_free_object((uint8_t **)parray, sizeof(struct s2n_array)));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_array_free(struct s2n_array *array)
{
- ENSURE_REF(array);
+ RESULT_ENSURE_REF(array);
return s2n_array_free_p(&array);
}
diff --git a/contrib/restricted/aws/s2n/utils/s2n_array.h b/contrib/restricted/aws/s2n/utils/s2n_array.h
index 3ae0996948..3e846ce4fd 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_array.h
+++ b/contrib/restricted/aws/s2n/utils/s2n_array.h
@@ -14,7 +14,7 @@
*/
#pragma once
-#include <s2n.h>
+#include "api/s2n.h"
#include "utils/s2n_blob.h"
#include "utils/s2n_result.h"
@@ -35,11 +35,11 @@ extern S2N_RESULT s2n_array_validate(const struct s2n_array *array);
extern struct s2n_array *s2n_array_new(uint32_t element_size);
extern S2N_RESULT s2n_array_init(struct s2n_array *array, uint32_t element_size);
extern S2N_RESULT s2n_array_pushback(struct s2n_array *array, void **element);
-extern S2N_RESULT s2n_array_get(struct s2n_array *array, uint32_t index, void **element);
-extern S2N_RESULT s2n_array_insert(struct s2n_array *array, uint32_t index, void **element);
-extern S2N_RESULT s2n_array_insert_and_copy(struct s2n_array *array, uint32_t index, void *element);
+extern S2N_RESULT s2n_array_get(struct s2n_array *array, uint32_t idx, void **element);
+extern S2N_RESULT s2n_array_insert(struct s2n_array *array, uint32_t idx, void **element);
+extern S2N_RESULT s2n_array_insert_and_copy(struct s2n_array *array, uint32_t idx, void *element);
extern S2N_RESULT s2n_array_num_elements(struct s2n_array *array, uint32_t *len);
extern S2N_RESULT s2n_array_capacity(struct s2n_array *array, uint32_t *capacity);
-extern S2N_RESULT s2n_array_remove(struct s2n_array *array, uint32_t index);
+extern S2N_RESULT s2n_array_remove(struct s2n_array *array, uint32_t idx);
extern S2N_RESULT s2n_array_free_p(struct s2n_array **parray);
extern S2N_RESULT s2n_array_free(struct s2n_array *array);
diff --git a/contrib/restricted/aws/s2n/utils/s2n_asn1_time.c b/contrib/restricted/aws/s2n/utils/s2n_asn1_time.c
index 01b7acceb1..7a4396da82 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_asn1_time.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_asn1_time.c
@@ -46,7 +46,9 @@ typedef enum parser_state {
} parser_state;
static inline long get_gmt_offset(struct tm *t) {
-#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__ANDROID__) || defined(ANDROID) || defined(__APPLE__) && defined(__MACH__)
+
+/* See: https://sourceware.org/git/?p=glibc.git;a=blob;f=include/features.h;h=ba272078cf2263ec88e039fda7524c136a4a7953;hb=HEAD */
+#if defined(__USE_MISC) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__ANDROID__) || defined(ANDROID) || defined(__APPLE__) && defined(__MACH__)
return t->tm_gmtoff;
#else
return t->tm_gmtoff;
@@ -62,7 +64,7 @@ static inline void get_current_timesettings(long *gmt_offset, int *is_dst) {
*is_dst = time_ptr.tm_isdst;
}
-#define PARSE_DIGIT(c, d) do { ENSURE(isdigit(c), S2N_ERR_SAFETY); d = c - '0'; } while(0)
+#define PARSE_DIGIT(c, d) do { RESULT_ENSURE(isdigit(c), S2N_ERR_SAFETY); d = c - '0'; } while(0)
/* this is just a standard state machine for ASN1 date format... nothing special.
* just do a character at a time and change the state per character encountered.
@@ -255,15 +257,15 @@ S2N_RESULT s2n_asn1_time_to_nano_since_epoch_ticks(const char *asn1_time, uint32
while (state < FINISHED && current_pos < str_len) {
char current_char = asn1_time[current_pos];
- ENSURE_OK(process_state(&state, current_char, &args), S2N_ERR_INVALID_ARGUMENT);
+ RESULT_ENSURE_OK(process_state(&state, current_char, &args), S2N_ERR_INVALID_ARGUMENT);
current_pos++;
}
/* state on subsecond means no timezone info was found and we assume local time */
- ENSURE(state == FINISHED || state == ON_SUBSECOND, S2N_ERR_INVALID_ARGUMENT);
+ RESULT_ENSURE(state == FINISHED || state == ON_SUBSECOND, S2N_ERR_INVALID_ARGUMENT);
time_t clock_data = mktime(&args.time);
- ENSURE_GTE(clock_data, 0);
+ RESULT_ENSURE_GTE(clock_data, 0);
/* ASN1 + and - is in format HHMM. We need to convert it to seconds for the adjustment */
long gmt_offset = (args.offset_hours * 3600) + (args.offset_minutes * 60);
@@ -280,10 +282,9 @@ S2N_RESULT s2n_asn1_time_to_nano_since_epoch_ticks(const char *asn1_time, uint32
gmt_offset -= args.time.tm_isdst != is_dst ? (args.time.tm_isdst - is_dst) * 3600 : 0;
}
- ENSURE_GTE(clock_data, gmt_offset);
+ RESULT_ENSURE_GTE(clock_data, gmt_offset);
/* convert to nanoseconds and add the timezone offset. */
*ticks = ((uint64_t) clock_data - gmt_offset) * 1000000000;
return S2N_RESULT_OK;
}
-
diff --git a/contrib/restricted/aws/s2n/utils/s2n_blob.c b/contrib/restricted/aws/s2n/utils/s2n_blob.c
index 649fb55c0d..f76e7208cb 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_blob.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_blob.c
@@ -22,61 +22,61 @@
#include "utils/s2n_safety.h"
#include "utils/s2n_blob.h"
-#include <s2n.h>
+#include "api/s2n.h"
S2N_RESULT s2n_blob_validate(const struct s2n_blob* b)
{
- ENSURE_REF(b);
- DEBUG_ENSURE(S2N_IMPLIES(b->data == NULL, b->size == 0), S2N_ERR_SAFETY);
- DEBUG_ENSURE(S2N_IMPLIES(b->data == NULL, b->allocated == 0), S2N_ERR_SAFETY);
- DEBUG_ENSURE(S2N_IMPLIES(b->growable == 0, b->allocated == 0), S2N_ERR_SAFETY);
- DEBUG_ENSURE(S2N_IMPLIES(b->growable != 0, b->size <= b->allocated), S2N_ERR_SAFETY);
- DEBUG_ENSURE(S2N_MEM_IS_READABLE(b->data, b->allocated), S2N_ERR_SAFETY);
- DEBUG_ENSURE(S2N_MEM_IS_READABLE(b->data, b->size), S2N_ERR_SAFETY);
+ RESULT_ENSURE_REF(b);
+ RESULT_DEBUG_ENSURE(S2N_IMPLIES(b->data == NULL, b->size == 0), S2N_ERR_SAFETY);
+ RESULT_DEBUG_ENSURE(S2N_IMPLIES(b->data == NULL, b->allocated == 0), S2N_ERR_SAFETY);
+ RESULT_DEBUG_ENSURE(S2N_IMPLIES(b->growable == 0, b->allocated == 0), S2N_ERR_SAFETY);
+ RESULT_DEBUG_ENSURE(S2N_IMPLIES(b->growable != 0, b->size <= b->allocated), S2N_ERR_SAFETY);
+ RESULT_DEBUG_ENSURE(S2N_MEM_IS_READABLE(b->data, b->allocated), S2N_ERR_SAFETY);
+ RESULT_DEBUG_ENSURE(S2N_MEM_IS_READABLE(b->data, b->size), S2N_ERR_SAFETY);
return S2N_RESULT_OK;
}
int s2n_blob_init(struct s2n_blob *b, uint8_t * data, uint32_t size)
{
- ENSURE_POSIX_REF(b);
- ENSURE_POSIX(S2N_MEM_IS_READABLE(data, size), S2N_ERR_SAFETY);
+ POSIX_ENSURE_REF(b);
+ POSIX_ENSURE(S2N_MEM_IS_READABLE(data, size), S2N_ERR_SAFETY);
*b = (struct s2n_blob) {.data = data, .size = size, .allocated = 0, .growable = 0};
- POSTCONDITION_POSIX(s2n_blob_validate(b));
+ POSIX_POSTCONDITION(s2n_blob_validate(b));
return S2N_SUCCESS;
}
int s2n_blob_zero(struct s2n_blob *b)
{
- PRECONDITION_POSIX(s2n_blob_validate(b));
- memset_check(b->data, 0, MAX(b->allocated, b->size));
- POSTCONDITION_POSIX(s2n_blob_validate(b));
+ POSIX_PRECONDITION(s2n_blob_validate(b));
+ POSIX_CHECKED_MEMSET(b->data, 0, MAX(b->allocated, b->size));
+ POSIX_POSTCONDITION(s2n_blob_validate(b));
return S2N_SUCCESS;
}
int s2n_blob_slice(const struct s2n_blob *b, struct s2n_blob *slice, uint32_t offset, uint32_t size)
{
- PRECONDITION_POSIX(s2n_blob_validate(b));
- PRECONDITION_POSIX(s2n_blob_validate(slice));
+ POSIX_PRECONDITION(s2n_blob_validate(b));
+ POSIX_PRECONDITION(s2n_blob_validate(slice));
uint32_t slice_size = 0;
- GUARD(s2n_add_overflow(offset, size, &slice_size));
- ENSURE_POSIX(b->size >= slice_size, S2N_ERR_SIZE_MISMATCH);
- slice->data = b->data + offset;
+ POSIX_GUARD(s2n_add_overflow(offset, size, &slice_size));
+ POSIX_ENSURE(b->size >= slice_size, S2N_ERR_SIZE_MISMATCH);
+ slice->data = (b->data) ? (b->data + offset) : NULL;
slice->size = size;
slice->growable = 0;
slice->allocated = 0;
- POSTCONDITION_POSIX(s2n_blob_validate(slice));
+ POSIX_POSTCONDITION(s2n_blob_validate(slice));
return S2N_SUCCESS;
}
int s2n_blob_char_to_lower(struct s2n_blob *b)
{
- PRECONDITION_POSIX(s2n_blob_validate(b));
+ POSIX_PRECONDITION(s2n_blob_validate(b));
for (size_t i = 0; i < b->size; i++) {
b->data[i] = tolower(b->data[i]);
}
- POSTCONDITION_POSIX(s2n_blob_validate(b));
+ POSIX_POSTCONDITION(s2n_blob_validate(b));
return S2N_SUCCESS;
}
@@ -105,22 +105,31 @@ static const uint8_t hex_inverse[256] = {
* string needs to a valid hex and blob needs to be large enough */
int s2n_hex_string_to_bytes(const uint8_t *str, struct s2n_blob *blob)
{
- ENSURE_POSIX_REF(str);
- PRECONDITION_POSIX(s2n_blob_validate(blob));
- uint32_t len = strlen((const char*)str);
- /* protects against overflows */
- gte_check(blob->size, len / 2);
- S2N_ERROR_IF(len % 2 != 0, S2N_ERR_INVALID_HEX);
-
- for (size_t i = 0; i < len; i += 2) {
- uint8_t high_nibble = hex_inverse[str[i]];
- S2N_ERROR_IF(high_nibble == 255, S2N_ERR_INVALID_HEX);
-
- uint8_t low_nibble = hex_inverse[str[i + 1]];
- S2N_ERROR_IF(low_nibble == 255, S2N_ERR_INVALID_HEX);
- blob->data[i / 2] = high_nibble << 4 | low_nibble;
+ POSIX_ENSURE_REF(str);
+ POSIX_PRECONDITION(s2n_blob_validate(blob));
+ uint32_t len_with_spaces = strlen((const char*)str);
+
+ size_t i = 0, j = 0;
+ while (j < len_with_spaces) {
+ if (str[j] == ' ') {
+ j++;
+ continue;
+ }
+
+ uint8_t high_nibble = hex_inverse[str[j]];
+ POSIX_ENSURE(high_nibble != 255, S2N_ERR_INVALID_HEX);
+
+ uint8_t low_nibble = hex_inverse[str[j + 1]];
+ POSIX_ENSURE(low_nibble != 255, S2N_ERR_INVALID_HEX);
+
+ POSIX_ENSURE(i < blob->size, S2N_ERR_INVALID_HEX);
+ blob->data[i] = high_nibble << 4 | low_nibble;
+
+ i++;
+ j+=2;
}
+ blob->size = i;
- POSTCONDITION_POSIX(s2n_blob_validate(blob));
+ POSIX_POSTCONDITION(s2n_blob_validate(blob));
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/utils/s2n_blob.h b/contrib/restricted/aws/s2n/utils/s2n_blob.h
index 8391b3567f..028e0156a2 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_blob.h
+++ b/contrib/restricted/aws/s2n/utils/s2n_blob.h
@@ -47,21 +47,28 @@ extern int s2n_blob_char_to_lower(struct s2n_blob *b);
extern int s2n_hex_string_to_bytes(const uint8_t *str, struct s2n_blob *blob);
extern int s2n_blob_slice(const struct s2n_blob *b, struct s2n_blob *slice, uint32_t offset, uint32_t size);
-#define s2n_stack_blob(name, requested_size, maximum) \
- size_t name ## _requested_size = (requested_size); \
- uint8_t name ## _buf[(maximum)] = {0}; \
- lte_check(name ## _requested_size, (maximum)); \
- struct s2n_blob name = {0}; \
- GUARD(s2n_blob_init(&name, name ## _buf, name ## _requested_size))
+#define s2n_stack_blob(name, requested_size, maximum) \
+ size_t name ## _requested_size = (requested_size); \
+ uint8_t name ## _buf[(maximum)] = {0}; \
+ POSIX_ENSURE_LTE(name ## _requested_size, (maximum)); \
+ struct s2n_blob name = {0}; \
+ POSIX_GUARD(s2n_blob_init(&name, name ## _buf, name ## _requested_size))
-#define S2N_BLOB_LABEL(name, str) \
- static uint8_t name##_data[] = str; \
+#define RESULT_STACK_BLOB(name, requested_size, maximum) \
+ size_t name ## _requested_size = (requested_size); \
+ uint8_t name ## _buf[(maximum)] = {0}; \
+ RESULT_ENSURE_LTE(name ## _requested_size, (maximum)); \
+ struct s2n_blob name = {0}; \
+ RESULT_GUARD_POSIX(s2n_blob_init(&name, name ## _buf, name ## _requested_size))
+
+#define S2N_BLOB_LABEL(name, str) \
+ static uint8_t name##_data[] = str; \
const struct s2n_blob name = { .data = name##_data, .size = sizeof(name##_data) - 1 };
/* The S2N_BLOB_FROM_HEX macro creates a s2n_blob with the contents of a hex string.
* It is allocated on a stack so there no need to free after use.
* hex should be a const char[]. This function checks against using char*,
* because sizeof needs to refer to the buffer length rather than a pointer size */
-#define S2N_BLOB_FROM_HEX( name, hex ) \
+#define S2N_BLOB_FROM_HEX( name, hex ) \
s2n_stack_blob(name, (sizeof(hex) - 1) / 2, (sizeof(hex) - 1) / 2); \
- GUARD(s2n_hex_string_to_bytes((const uint8_t*)hex, &name));
+ POSIX_GUARD(s2n_hex_string_to_bytes((const uint8_t*)hex, &name));
diff --git a/contrib/restricted/aws/s2n/utils/s2n_ensure.c b/contrib/restricted/aws/s2n/utils/s2n_ensure.c
index 4cfe7e40c9..910844ae9e 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_ensure.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_ensure.c
@@ -23,5 +23,6 @@ void* s2n_ensure_memcpy_trace(void *restrict to, const void *restrict from, size
return NULL;
}
- return memcpy(to, from, size);
+ /* use memmove instead of memcpy since it'll handle overlapping regions and not result in UB */
+ return memmove(to, from, size);
}
diff --git a/contrib/restricted/aws/s2n/utils/s2n_ensure.h b/contrib/restricted/aws/s2n/utils/s2n_ensure.h
index 8949521e9a..872f208df1 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_ensure.h
+++ b/contrib/restricted/aws/s2n/utils/s2n_ensure.h
@@ -67,4 +67,78 @@
} \
} while(0)
+/**
+ * `restrict` is a part of the c99 standard and will work with any C compiler. If you're trying to
+ * compile with a C++ compiler `restrict` is invalid. However some C++ compilers support the behavior
+ * of `restrict` using the `__restrict__` keyword. Therefore if the compiler supports `__restrict__`
+ * use it.
+ *
+ * This is helpful for the benchmarks in tests/benchmark which use Google's Benchmark library and
+ * are all written in C++.
+ *
+ * https://gcc.gnu.org/onlinedocs/gcc/Restricted-Pointers.html
+ *
+ */
+#if defined(S2N___RESTRICT__SUPPORTED)
+extern void* s2n_ensure_memcpy_trace(void *__restrict__ to, const void *__restrict__ from, size_t size, const char *debug_str);
+#else
extern void* s2n_ensure_memcpy_trace(void *restrict to, const void *restrict from, size_t size, const char *debug_str);
+#endif
+
+/**
+ * These macros should not be used in validate functions.
+ * All validate functions are also used in assumptions for CBMC proofs,
+ * which should not contain __CPROVER_*_ok primitives. The use of these primitives
+ * in assumptions may lead to spurious results.
+ * When the code is being verified using CBMC, these properties are formally verified;
+ * When the code is built in debug mode, they are checked as much as possible using assertions.
+ * When the code is built in production mode, non-fatal properties are not checked.
+ * Violations of these properties are undefined behaviour.
+ */
+#ifdef CBMC
+# define S2N_MEM_IS_READABLE_CHECK(base, len) (((len) == 0) || __CPROVER_r_ok((base), (len)))
+# define S2N_MEM_IS_WRITABLE_CHECK(base, len) (((len) == 0) || __CPROVER_w_ok((base), (len)))
+#else
+/* the C runtime does not give a way to check these properties,
+ * but we can at least check for nullness. */
+# define S2N_MEM_IS_READABLE_CHECK(base, len) (((len) == 0) || (base) != NULL)
+# define S2N_MEM_IS_WRITABLE_CHECK(base, len) (((len) == 0) || (base) != NULL)
+#endif /* CBMC */
+
+/**
+ * These macros can safely be used in validate functions.
+ */
+#define S2N_MEM_IS_READABLE(base, len) (((len) == 0) || (base) != NULL)
+#define S2N_MEM_IS_WRITABLE(base, len) (((len) == 0) || (base) != NULL)
+#define S2N_OBJECT_PTR_IS_READABLE(ptr) ((ptr) != NULL)
+#define S2N_OBJECT_PTR_IS_WRITABLE(ptr) ((ptr) != NULL)
+
+#define S2N_IMPLIES(a, b) (!(a) || (b))
+/**
+ * If and only if (iff) is a biconditional logical connective between statements a and b.
+ * Equivalent to (S2N_IMPLIES(a, b) && S2N_IMPLIES(b, a)).
+ */
+#define S2N_IFF(a, b) (!!(a) == !!(b))
+
+/**
+ * These macros are used to specify code contracts in CBMC proofs.
+ * Define function contracts.
+ * When the code is being verified using CBMC, these contracts are formally verified;
+ * When the code is built in production mode, contracts are not checked.
+ * Violations of the function contracts are undefined behaviour.
+ */
+#ifdef CBMC
+# define CONTRACT_ASSIGNS(...) __CPROVER_assigns(__VA_ARGS__)
+# define CONTRACT_ASSIGNS_ERR(...) CONTRACT_ASSIGNS(__VA_ARGS__, s2n_debug_str, s2n_errno)
+# define CONTRACT_REQUIRES(...) __CPROVER_requires(__VA_ARGS__)
+# define CONTRACT_ENSURES(...) __CPROVER_ensures(__VA_ARGS__)
+# define CONTRACT_INVARIANT(...) __CPROVER_loop_invariant(__VA_ARGS__)
+# define CONTRACT_RETURN_VALUE (__CPROVER_return_value)
+#else
+# define CONTRACT_ASSIGNS(...)
+# define CONTRACT_ASSIGNS_ERR(...)
+# define CONTRACT_REQUIRES(...)
+# define CONTRACT_ENSURES(...)
+# define CONTRACT_INVARIANT(...)
+# define CONTRACT_RETURN_VALUE
+#endif
diff --git a/contrib/restricted/aws/s2n/utils/s2n_fork_detection.c b/contrib/restricted/aws/s2n/utils/s2n_fork_detection.c
new file mode 100644
index 0000000000..b148f4bbb3
--- /dev/null
+++ b/contrib/restricted/aws/s2n/utils/s2n_fork_detection.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+/* This captures Darwin specialities. This is the only APPLE flavor we care about.
+ * Here we also capture varius required feature test macros.
+ */
+#if defined(__APPLE__)
+ typedef struct _opaque_pthread_once_t __darwin_pthread_once_t;
+ typedef __darwin_pthread_once_t pthread_once_t;
+ #define _DARWIN_C_SOURCE
+#elif !defined(_GNU_SOURCE)
+ /* Keep in sync with feature probe tests/features/madvise.c */
+ #define _GNU_SOURCE
+#endif
+
+#include <sys/mman.h>
+
+/* Not always defined for Darwin */
+#if !defined(MAP_ANONYMOUS)
+ #define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#include "error/s2n_errno.h"
+#include "utils/s2n_fork_detection.h"
+#include "utils/s2n_safety.h"
+
+#include <pthread.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+
+#if defined(S2N_MADVISE_SUPPORTED) && defined(MADV_WIPEONFORK)
+#if (MADV_WIPEONFORK != 18)
+#error "MADV_WIPEONFORK is not 18"
+#endif
+#else /* defined(S2N_MADVISE_SUPPORTED) && defined(MADV_WIPEONFORK) */
+#define MADV_WIPEONFORK 18
+#endif
+
+/* These variables are used to disable all fork detection mechanisms or at the
+ * individual level during testing.
+ */
+static bool ignore_wipeonfork_or_inherit_zero_method_for_testing = false;
+static bool ignore_pthread_atfork_method_for_testing = false;
+static bool ignore_fork_detection_for_testing = false;
+
+#define S2N_FORK_EVENT 0
+#define S2N_NO_FORK_EVENT 1
+
+struct FGN_STATE {
+ /* The current cached fork generation number for this process */
+ uint64_t current_fork_generation_number;
+
+ /* Semaphore controlling access to the shared sentinel and signaling whether
+ * fork detection is enabled or not. We could use zero_on_fork_addr, but
+ * avoid overloading by using an explicit variable.
+ */
+ bool is_fork_detection_enabled;
+
+ /* Sentinel that signals a fork event has occurred */
+ volatile char *zero_on_fork_addr;
+
+ pthread_once_t fork_detection_once;
+ pthread_rwlock_t fork_detection_rw_lock;
+};
+
+/* We only need a single statically initialised state. Note, the state is
+ * inherited by child processes.
+ */
+static struct FGN_STATE fgn_state = {
+ .current_fork_generation_number = 0,
+ .is_fork_detection_enabled = false,
+ .zero_on_fork_addr = NULL,
+ .fork_detection_once = PTHREAD_ONCE_INIT,
+ .fork_detection_rw_lock = PTHREAD_RWLOCK_INITIALIZER,
+};
+
+
+/* Can currently never fail. See initialise_fork_detection_methods() for
+ * motivation.
+ */
+static inline S2N_RESULT s2n_initialise_wipeonfork_best_effort(void *addr, long page_size)
+{
+#if defined(S2N_MADVISE_SUPPORTED)
+ /* Return value ignored on purpose */
+ madvise(addr, (size_t) page_size, MADV_WIPEONFORK);
+#endif
+
+ return S2N_RESULT_OK;
+}
+
+static inline S2N_RESULT s2n_initialise_inherit_zero(void *addr, long page_size)
+{
+#if defined(S2N_MINHERIT_SUPPORTED) && defined(MAP_INHERIT_ZERO)
+ RESULT_ENSURE(minherit(addr, pagesize, MAP_INHERIT_ZERO) == 0, S2N_ERR_FORK_DETECTION_INIT);
+#endif
+
+ return S2N_RESULT_OK;
+}
+
+static void s2n_pthread_atfork_on_fork(void)
+{
+ /* This zeroises the first byte of the memory page pointed to by
+ * *zero_on_fork_addr. This is the same byte used as fork event detection
+ * sentinel in s2n_get_fork_generation_number(). The same memory page, and in
+ * turn, the byte, is also the memory zeroised by the MADV_WIPEONFORK fork
+ * detection mechanism.
+ *
+ * Aquire locks to be on the safe side. We want to avoid the checks in
+ * s2n_get_fork_generation_number() getting executed before setting the sentinel
+ * flag. The write lock prevents any other thread from owning any other type
+ * of lock.
+ *
+ * pthread_atfork_on_fork() cannot return errors. Hence, there is no way to
+ * gracefully recover if [un]locking fails.
+ */
+ if (pthread_rwlock_wrlock(&fgn_state.fork_detection_rw_lock) != 0) {
+ printf("pthread_rwlock_wrlock() failed. Aborting.\n");
+ abort();
+ }
+
+ if (fgn_state.zero_on_fork_addr == NULL) {
+ printf("fgn_state.zero_on_fork_addr is NULL. Aborting.\n");
+ abort();
+ }
+ *fgn_state.zero_on_fork_addr = 0;
+
+ if (pthread_rwlock_unlock(&fgn_state.fork_detection_rw_lock) != 0) {
+ printf("pthread_rwlock_unlock() failed. Aborting.\n");
+ abort();
+ }
+}
+
+static S2N_RESULT s2n_inititalise_pthread_atfork(void)
+{
+ /* Register the fork handler pthread_atfork_on_fork that is excuted in the
+ * child process after a fork.
+ */
+ RESULT_ENSURE(pthread_atfork(NULL, NULL, s2n_pthread_atfork_on_fork) == 0, S2N_ERR_FORK_DETECTION_INIT);
+
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_initialise_fork_detection_methods_try(void *addr, long page_size)
+{
+ RESULT_GUARD_PTR(addr);
+
+ /* Some systems don't define MADV_WIPEONFORK in sys/mman.h but the kernel
+ * still supports the mechanism (AL2 being a prime example). Likely because
+ * glibc on the system is old. We might be able to include kernel header
+ * files directly, that define MADV_WIPEONFORK, conditioning on specific
+ * OS's. But it is a mess. A more reliable method is to probe the system, at
+ * run-time, whether madvise supports the MADV_WIPEONFORK advice. However,
+ * the method to probe for this feature is equivalent to actually attempting
+ * to initialise the MADV_WIPEONFORK fork detection. Compare with
+ * probe_madv_wipeonfork_support() (used for testing).
+ *
+ * Instead, we apply best-effort to initialise the MADV_WIPEONFORK fork
+ * detection and otherwise always require pthread_atfork to be initialised.
+ * We also currently always apply prediction resistance. So, this should be
+ * a safe default.
+ */
+ if (ignore_wipeonfork_or_inherit_zero_method_for_testing == false) {
+ RESULT_GUARD(s2n_initialise_wipeonfork_best_effort(addr, page_size));
+ }
+
+ if (ignore_wipeonfork_or_inherit_zero_method_for_testing == false) {
+ RESULT_GUARD(s2n_initialise_inherit_zero(addr, page_size));
+ }
+
+ if (ignore_pthread_atfork_method_for_testing == false) {
+ RESULT_GUARD(s2n_inititalise_pthread_atfork());
+ }
+
+ fgn_state.zero_on_fork_addr = addr;
+ *fgn_state.zero_on_fork_addr = S2N_NO_FORK_EVENT;
+ fgn_state.is_fork_detection_enabled = true;
+
+ return S2N_RESULT_OK;
+}
+
+static S2N_RESULT s2n_setup_mapping(void **addr, long *page_size) {
+
+ *page_size = sysconf(_SC_PAGESIZE);
+ RESULT_ENSURE_GT(*page_size, 0);
+
+ *addr = mmap(NULL, (size_t) *page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ RESULT_ENSURE_NE(*addr, MAP_FAILED);
+
+ return S2N_RESULT_OK;
+}
+
+static void s2n_initialise_fork_detection_methods(void)
+{
+ void *addr = MAP_FAILED;
+ long page_size = 0;
+
+ /* Only used to disable fork detection mechanisms during testing. */
+ if (ignore_wipeonfork_or_inherit_zero_method_for_testing == true &&
+ ignore_pthread_atfork_method_for_testing == true) {
+
+ ignore_fork_detection_for_testing = true;
+ return;
+ }
+
+ if (s2n_result_is_error(s2n_setup_mapping(&addr, &page_size)) == true) {
+ return;
+ }
+
+ /* Now we know that we have some memory mapped. Try to initialise fork
+ * detection methods. Unmap the memory if we fail for some reason.
+ */
+ if (s2n_result_is_error(s2n_initialise_fork_detection_methods_try(addr, page_size)) == true) {
+ /* No reason to verify return value of munmap() since we can't use that
+ * information for anything anyway. */
+ munmap(addr, (size_t) page_size);
+ addr = NULL;
+ fgn_state.zero_on_fork_addr = NULL;
+ fgn_state.is_fork_detection_enabled = false;
+ }
+}
+
+/* s2n_get_fork_generation_number returns S2N_RESULT_OK on success and
+ * S2N_RESULT_ERROR otherwise.
+ *
+ * On success, returns the current fork generation number in
+ * return_fork_generation_number. Caller must synchronise access to
+ * return_fork_generation_number.
+ */
+S2N_RESULT s2n_get_fork_generation_number(uint64_t *return_fork_generation_number)
+{
+ RESULT_ENSURE(pthread_once(&fgn_state.fork_detection_once, s2n_initialise_fork_detection_methods) == 0, S2N_ERR_FORK_DETECTION_INIT);
+
+ if (ignore_fork_detection_for_testing == true) {
+ /* Fork detection is meant to be disabled. Hence, return success.
+ * This should only happen during testing.
+ */
+ RESULT_ENSURE(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST);
+ return S2N_RESULT_OK;
+ }
+
+ RESULT_ENSURE(fgn_state.is_fork_detection_enabled == true, S2N_ERR_FORK_DETECTION_INIT);
+
+ /* In most cases, we would not need to increment the fork generation number.
+ * So, it is cheaper, in the expected case, to take an optimistic read lock
+ * and later aquire a write lock if needed.
+ * Note that we set the returned fgn before checking for a fork event. We
+ * need to do this because thread execution might change between releasing
+ * the read lock and taking the write lock. In that time span, another
+ * thread can reset the fork event detection sentinel and we return from
+ * s2n_get_fork_generation_number() without setting the returned fgn
+ * appropriately.
+ */
+ RESULT_ENSURE(pthread_rwlock_rdlock(&fgn_state.fork_detection_rw_lock) == 0, S2N_ERR_RETRIEVE_FORK_GENERATION_NUMBER);
+ *return_fork_generation_number = fgn_state.current_fork_generation_number;
+ if (*fgn_state.zero_on_fork_addr != S2N_FORK_EVENT) {
+ /* No fork event detected. */
+ RESULT_ENSURE(pthread_rwlock_unlock(&fgn_state.fork_detection_rw_lock) == 0, S2N_ERR_RETRIEVE_FORK_GENERATION_NUMBER);
+ return S2N_RESULT_OK;
+ }
+ RESULT_ENSURE(pthread_rwlock_unlock(&fgn_state.fork_detection_rw_lock) == 0, S2N_ERR_RETRIEVE_FORK_GENERATION_NUMBER);
+
+ /* We are mutating the process-global, cached fork generation number. Need
+ * to acquire the write lock for that. Set returned fgn before checking the
+ * if condition with the same reasons as above.
+ */
+ RESULT_ENSURE(pthread_rwlock_wrlock(&fgn_state.fork_detection_rw_lock) == 0, S2N_ERR_RETRIEVE_FORK_GENERATION_NUMBER);
+ *return_fork_generation_number = fgn_state.current_fork_generation_number;
+ if (*fgn_state.zero_on_fork_addr == S2N_FORK_EVENT) {
+ /* Fork event has been detected; reset sentinel, increment cached fork
+ * generation number (which is now "current" in this child process), and
+ * write incremented fork generation number to the output parameter.
+ */
+ *fgn_state.zero_on_fork_addr = S2N_NO_FORK_EVENT;
+ fgn_state.current_fork_generation_number = fgn_state.current_fork_generation_number + 1;
+ *return_fork_generation_number = fgn_state.current_fork_generation_number;
+ }
+ RESULT_ENSURE(pthread_rwlock_unlock(&fgn_state.fork_detection_rw_lock) == 0, S2N_ERR_RETRIEVE_FORK_GENERATION_NUMBER);
+
+ return S2N_RESULT_OK;
+}
+
+static void s2n_cleanup_cb_munmap(void **probe_addr)
+{
+ munmap(*probe_addr, (size_t) sysconf(_SC_PAGESIZE));
+}
+
+/* Run-time probe checking whether the system supports the MADV_WIPEONFORK fork
+ * detection mechanism.
+ */
+static S2N_RESULT s2n_probe_madv_wipeonfork_support(void) {
+
+ bool result = false;
+
+ /* It is not an error to call munmap on a range that does not contain any
+ * mapped pages.
+ */
+ DEFER_CLEANUP(void *probe_addr = MAP_FAILED, s2n_cleanup_cb_munmap);
+ long page_size = 0;
+
+ RESULT_GUARD(s2n_setup_mapping(&probe_addr, &page_size));
+
+#if defined(S2N_MADVISE_SUPPORTED)
+ /* Some versions of qemu (up to at least 5.0.0-rc4, see
+ * linux-user/syscall.c) ignore invalid advice arguments. Hence, we first
+ * verify that madvise() rejects advice arguments it doesn't know about.
+ */
+ RESULT_ENSURE_NE(madvise(probe_addr, (size_t) page_size, -1), 0);
+ RESULT_ENSURE_EQ(madvise(probe_addr, (size_t) page_size, MADV_WIPEONFORK), 0);
+
+ result = true;
+#endif
+
+ RESULT_ENSURE_EQ(result, true);
+
+ return S2N_RESULT_OK;
+}
+
+bool s2n_is_madv_wipeonfork_supported(void)
+{
+ return s2n_result_is_ok(s2n_probe_madv_wipeonfork_support());
+}
+
+bool s2n_is_map_inherit_zero_supported(void)
+{
+#if defined(S2N_MINHERIT_SUPPORTED) && defined(MAP_INHERIT_ZERO)
+ return true
+#else
+ return false;
+#endif
+}
+
+/* Use for testing only */
+S2N_RESULT s2n_ignore_wipeonfork_and_inherit_zero_for_testing(void) {
+ RESULT_ENSURE(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST);
+
+ ignore_wipeonfork_or_inherit_zero_method_for_testing = true;
+
+ return S2N_RESULT_OK;
+}
+
+S2N_RESULT s2n_ignore_pthread_atfork_for_testing(void) {
+ RESULT_ENSURE(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST);
+
+ ignore_pthread_atfork_method_for_testing = true;
+
+ return S2N_RESULT_OK;
+}
+
diff --git a/contrib/restricted/aws/s2n/utils/s2n_fork_detection.h b/contrib/restricted/aws/s2n/utils/s2n_fork_detection.h
new file mode 100644
index 0000000000..d27ae95201
--- /dev/null
+++ b/contrib/restricted/aws/s2n/utils/s2n_fork_detection.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include "utils/s2n_result.h"
+
+S2N_RESULT s2n_get_fork_generation_number(uint64_t *return_fork_generation_number);
+bool s2n_is_madv_wipeonfork_supported(void);
+bool s2n_is_map_inherit_zero_supported(void);
+
+/* Use for testing only */
+S2N_RESULT s2n_ignore_wipeonfork_and_inherit_zero_for_testing(void);
+S2N_RESULT s2n_ignore_pthread_atfork_for_testing(void);
diff --git a/contrib/restricted/aws/s2n/utils/s2n_init.c b/contrib/restricted/aws/s2n/utils/s2n_init.c
index 0f79f959fb..fb5b3ae928 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_init.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_init.c
@@ -20,15 +20,19 @@
#include "tls/extensions/s2n_extension_type.h"
#include "tls/s2n_security_policies.h"
#include "tls/extensions/s2n_client_key_share.h"
+#include "tls/s2n_tls13_secrets.h"
#include "utils/s2n_mem.h"
#include "utils/s2n_random.h"
#include "utils/s2n_safety.h"
+#include "utils/s2n_safety_macros.h"
#include "openssl/opensslv.h"
#include "pq-crypto/s2n_pq.h"
+#include <pthread.h>
+
static void s2n_cleanup_atexit(void);
unsigned long s2n_get_openssl_version(void)
@@ -36,48 +40,71 @@ unsigned long s2n_get_openssl_version(void)
return OPENSSL_VERSION_NUMBER;
}
+static pthread_t main_thread = 0;
+static bool initialized = false;
+static bool atexit_cleanup = true;
+int s2n_disable_atexit(void) {
+ POSIX_ENSURE(!initialized, S2N_ERR_INITIALIZED);
+ atexit_cleanup = false;
+ return S2N_SUCCESS;
+}
+
int s2n_init(void)
{
- GUARD_POSIX(s2n_fips_init());
- GUARD_POSIX(s2n_mem_init());
- GUARD_AS_POSIX(s2n_rand_init());
- GUARD_POSIX(s2n_cipher_suites_init());
- GUARD_POSIX(s2n_security_policies_init());
- GUARD_POSIX(s2n_config_defaults_init());
- GUARD_POSIX(s2n_extension_type_init());
- GUARD_AS_POSIX(s2n_pq_init());
-
- S2N_ERROR_IF(atexit(s2n_cleanup_atexit) != 0, S2N_ERR_ATEXIT);
+ main_thread = pthread_self();
+ POSIX_GUARD(s2n_fips_init());
+ POSIX_GUARD(s2n_mem_init());
+ POSIX_GUARD_RESULT(s2n_rand_init());
+ POSIX_GUARD(s2n_cipher_suites_init());
+ POSIX_GUARD(s2n_security_policies_init());
+ POSIX_GUARD(s2n_config_defaults_init());
+ POSIX_GUARD(s2n_extension_type_init());
+ POSIX_GUARD_RESULT(s2n_pq_init());
+ POSIX_GUARD_RESULT(s2n_tls13_empty_transcripts_init());
+
+ if (atexit_cleanup) {
+ POSIX_ENSURE_OK(atexit(s2n_cleanup_atexit), S2N_ERR_ATEXIT);
+ }
if (getenv("S2N_PRINT_STACKTRACE")) {
- s2n_stack_traces_enabled_set(true);
+ s2n_stack_traces_enabled_set(true);
}
- return 0;
-}
+ initialized = true;
-int s2n_cleanup(void)
-{
- /* s2n_cleanup is supposed to be called from each thread before exiting,
- * so ensure that whatever clean ups we have here are thread safe */
- GUARD_AS_POSIX(s2n_rand_cleanup_thread());
- return 0;
+ return S2N_SUCCESS;
}
static bool s2n_cleanup_atexit_impl(void)
{
/* all of these should run, regardless of result, but the
* values to need to be consumed to prevent warnings */
+
+ /* the configs need to be wiped before resetting the memory callbacks */
+ s2n_wipe_static_configs();
+
bool a = s2n_result_is_ok(s2n_rand_cleanup_thread());
bool b = s2n_result_is_ok(s2n_rand_cleanup());
bool c = s2n_mem_cleanup() == 0;
- s2n_wipe_static_configs();
return a && b && c;
}
+int s2n_cleanup(void)
+{
+ /* s2n_cleanup is supposed to be called from each thread before exiting,
+ * so ensure that whatever clean ups we have here are thread safe */
+ POSIX_GUARD_RESULT(s2n_rand_cleanup_thread());
+
+ /* If this is the main thread and atexit cleanup is disabled,
+ * perform final cleanup now */
+ if (pthread_equal(pthread_self(), main_thread) && !atexit_cleanup) {
+ POSIX_ENSURE(s2n_cleanup_atexit_impl(), S2N_ERR_ATEXIT);
+ }
+ return 0;
+}
+
static void s2n_cleanup_atexit(void)
{
s2n_cleanup_atexit_impl();
}
-
diff --git a/contrib/restricted/aws/s2n/utils/s2n_map.c b/contrib/restricted/aws/s2n/utils/s2n_map.c
index 8851b14e1b..1b3d9eaa9c 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_map.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_map.c
@@ -27,22 +27,23 @@
#include "utils/s2n_map.h"
#include "utils/s2n_map_internal.h"
-#include <s2n.h>
+#include "api/s2n.h"
#define S2N_INITIAL_TABLE_SIZE 1024
static S2N_RESULT s2n_map_slot(const struct s2n_map *map, struct s2n_blob *key, uint32_t *slot)
{
+ RESULT_ENSURE_REF(map);
union {
uint8_t u8[32];
uint32_t u32[8];
} digest;
DEFER_CLEANUP(struct s2n_hash_state sha256 = {0}, s2n_hash_free);
- GUARD_AS_RESULT(s2n_hash_new(&sha256));
- GUARD_AS_RESULT(s2n_hash_init(&sha256, S2N_HASH_SHA256));
- GUARD_AS_RESULT(s2n_hash_update(&sha256, key->data, key->size));
- GUARD_AS_RESULT(s2n_hash_digest(&sha256, digest.u8, sizeof(digest)));
+ RESULT_GUARD_POSIX(s2n_hash_new(&sha256));
+ RESULT_GUARD_POSIX(s2n_hash_init(&sha256, S2N_HASH_SHA256));
+ RESULT_GUARD_POSIX(s2n_hash_update(&sha256, key->data, key->size));
+ RESULT_GUARD_POSIX(s2n_hash_digest(&sha256, digest.u8, sizeof(digest)));
*slot = digest.u32[0] % map->capacity;
return S2N_RESULT_OK;
@@ -50,27 +51,28 @@ static S2N_RESULT s2n_map_slot(const struct s2n_map *map, struct s2n_blob *key,
static S2N_RESULT s2n_map_embiggen(struct s2n_map *map, uint32_t capacity)
{
+ RESULT_ENSURE_REF(map);
struct s2n_blob mem = {0};
struct s2n_map tmp = {0};
- ENSURE(!map->immutable, S2N_ERR_MAP_IMMUTABLE);
+ RESULT_ENSURE(!map->immutable, S2N_ERR_MAP_IMMUTABLE);
- GUARD_AS_RESULT(s2n_alloc(&mem, (capacity * sizeof(struct s2n_map_entry))));
- GUARD_AS_RESULT(s2n_blob_zero(&mem));
+ RESULT_GUARD_POSIX(s2n_alloc(&mem, (capacity * sizeof(struct s2n_map_entry))));
+ RESULT_GUARD_POSIX(s2n_blob_zero(&mem));
tmp.capacity = capacity;
tmp.size = 0;
tmp.table = (void *) mem.data;
tmp.immutable = 0;
- for (int i = 0; i < map->capacity; i++) {
+ for (uint32_t i = 0; i < map->capacity; i++) {
if (map->table[i].key.size) {
- GUARD_RESULT(s2n_map_add(&tmp, &map->table[i].key, &map->table[i].value));
- GUARD_AS_RESULT(s2n_free(&map->table[i].key));
- GUARD_AS_RESULT(s2n_free(&map->table[i].value));
+ RESULT_GUARD(s2n_map_add(&tmp, &map->table[i].key, &map->table[i].value));
+ RESULT_GUARD_POSIX(s2n_free(&map->table[i].key));
+ RESULT_GUARD_POSIX(s2n_free(&map->table[i].value));
}
}
- GUARD_AS_RESULT(s2n_free_object((uint8_t **)&map->table, map->capacity * sizeof(struct s2n_map_entry)));
+ RESULT_GUARD_POSIX(s2n_free_object((uint8_t **)&map->table, map->capacity * sizeof(struct s2n_map_entry)));
/* Clone the temporary map */
map->capacity = tmp.capacity;
@@ -88,11 +90,11 @@ struct s2n_map *s2n_map_new()
struct s2n_map *s2n_map_new_with_initial_capacity(uint32_t capacity)
{
- S2N_ERROR_IF_PTR(capacity == 0, S2N_ERR_MAP_INVALID_MAP_SIZE);
+ PTR_ENSURE(capacity != 0, S2N_ERR_MAP_INVALID_MAP_SIZE);
struct s2n_blob mem = {0};
struct s2n_map *map;
- GUARD_POSIX_PTR(s2n_alloc(&mem, sizeof(struct s2n_map)));
+ PTR_GUARD_POSIX(s2n_alloc(&mem, sizeof(struct s2n_map)));
map = (void *) mem.data;
map->capacity = 0;
@@ -100,22 +102,23 @@ struct s2n_map *s2n_map_new_with_initial_capacity(uint32_t capacity)
map->immutable = 0;
map->table = NULL;
- GUARD_RESULT_PTR(s2n_map_embiggen(map, capacity));
+ PTR_GUARD_RESULT(s2n_map_embiggen(map, capacity));
return map;
}
S2N_RESULT s2n_map_add(struct s2n_map *map, struct s2n_blob *key, struct s2n_blob *value)
{
- ENSURE(!map->immutable, S2N_ERR_MAP_IMMUTABLE);
+ RESULT_ENSURE_REF(map);
+ RESULT_ENSURE(!map->immutable, S2N_ERR_MAP_IMMUTABLE);
if (map->capacity < (map->size * 2)) {
/* Embiggen the map */
- GUARD_RESULT(s2n_map_embiggen(map, map->capacity * 2));
+ RESULT_GUARD(s2n_map_embiggen(map, map->capacity * 2));
}
uint32_t slot = 0;
- GUARD_RESULT(s2n_map_slot(map, key, &slot));
+ RESULT_GUARD(s2n_map_slot(map, key, &slot));
/* Linear probing until we find an empty slot */
while(map->table[slot].key.size) {
@@ -127,11 +130,11 @@ S2N_RESULT s2n_map_add(struct s2n_map *map, struct s2n_blob *key, struct s2n_blo
}
/* We found a duplicate key */
- BAIL(S2N_ERR_MAP_DUPLICATE);
+ RESULT_BAIL(S2N_ERR_MAP_DUPLICATE);
}
- GUARD_AS_RESULT(s2n_dup(key, &map->table[slot].key));
- GUARD_AS_RESULT(s2n_dup(value, &map->table[slot].value));
+ RESULT_GUARD_POSIX(s2n_dup(key, &map->table[slot].key));
+ RESULT_GUARD_POSIX(s2n_dup(value, &map->table[slot].value));
map->size++;
return S2N_RESULT_OK;
@@ -139,15 +142,16 @@ S2N_RESULT s2n_map_add(struct s2n_map *map, struct s2n_blob *key, struct s2n_blo
S2N_RESULT s2n_map_put(struct s2n_map *map, struct s2n_blob *key, struct s2n_blob *value)
{
- ENSURE(!map->immutable, S2N_ERR_MAP_IMMUTABLE);
+ RESULT_ENSURE_REF(map);
+ RESULT_ENSURE(!map->immutable, S2N_ERR_MAP_IMMUTABLE);
if (map->capacity < (map->size * 2)) {
/* Embiggen the map */
- GUARD_RESULT(s2n_map_embiggen(map, map->capacity * 2));
+ RESULT_GUARD(s2n_map_embiggen(map, map->capacity * 2));
}
uint32_t slot = 0;
- GUARD_RESULT(s2n_map_slot(map, key, &slot));
+ RESULT_GUARD(s2n_map_slot(map, key, &slot));
/* Linear probing until we find an empty slot */
while(map->table[slot].key.size) {
@@ -159,14 +163,14 @@ S2N_RESULT s2n_map_put(struct s2n_map *map, struct s2n_blob *key, struct s2n_blo
}
/* We found a duplicate key that will be overwritten */
- GUARD_AS_RESULT(s2n_free(&map->table[slot].key));
- GUARD_AS_RESULT(s2n_free(&map->table[slot].value));
+ RESULT_GUARD_POSIX(s2n_free(&map->table[slot].key));
+ RESULT_GUARD_POSIX(s2n_free(&map->table[slot].value));
map->size--;
break;
}
- GUARD_AS_RESULT(s2n_dup(key, &map->table[slot].key));
- GUARD_AS_RESULT(s2n_dup(value, &map->table[slot].value));
+ RESULT_GUARD_POSIX(s2n_dup(key, &map->table[slot].key));
+ RESULT_GUARD_POSIX(s2n_dup(value, &map->table[slot].value));
map->size++;
return S2N_RESULT_OK;
@@ -174,6 +178,7 @@ S2N_RESULT s2n_map_put(struct s2n_map *map, struct s2n_blob *key, struct s2n_blo
S2N_RESULT s2n_map_complete(struct s2n_map *map)
{
+ RESULT_ENSURE_REF(map);
map->immutable = 1;
return S2N_RESULT_OK;
@@ -181,6 +186,7 @@ S2N_RESULT s2n_map_complete(struct s2n_map *map)
S2N_RESULT s2n_map_unlock(struct s2n_map *map)
{
+ RESULT_ENSURE_REF(map);
map->immutable = 0;
return S2N_RESULT_OK;
@@ -188,10 +194,11 @@ S2N_RESULT s2n_map_unlock(struct s2n_map *map)
S2N_RESULT s2n_map_lookup(const struct s2n_map *map, struct s2n_blob *key, struct s2n_blob *value, bool *key_found)
{
- ENSURE(map->immutable, S2N_ERR_MAP_MUTABLE);
+ RESULT_ENSURE_REF(map);
+ RESULT_ENSURE(map->immutable, S2N_ERR_MAP_MUTABLE);
uint32_t slot = 0;
- GUARD_RESULT(s2n_map_slot(map, key, &slot));
+ RESULT_GUARD(s2n_map_slot(map, key, &slot));
const uint32_t initial_slot = slot;
while(map->table[slot].key.size) {
@@ -222,19 +229,25 @@ S2N_RESULT s2n_map_lookup(const struct s2n_map *map, struct s2n_blob *key, struc
S2N_RESULT s2n_map_free(struct s2n_map *map)
{
+ if (map == NULL) {
+ return S2N_RESULT_OK;
+ }
+
/* Free the keys and values */
- for (int i = 0; i < map->capacity; i++) {
+ /* cppcheck has a false positive warning for checking the pointer here */
+ /* cppcheck-suppress nullPointerRedundantCheck */
+ for (uint32_t i = 0; i < map->capacity; i++) {
if (map->table[i].key.size) {
- GUARD_AS_RESULT(s2n_free(&map->table[i].key));
- GUARD_AS_RESULT(s2n_free(&map->table[i].value));
+ RESULT_GUARD_POSIX(s2n_free(&map->table[i].key));
+ RESULT_GUARD_POSIX(s2n_free(&map->table[i].value));
}
}
/* Free the table */
- GUARD_AS_RESULT(s2n_free_object((uint8_t **)&map->table, map->capacity * sizeof(struct s2n_map_entry)));
+ RESULT_GUARD_POSIX(s2n_free_object((uint8_t **)&map->table, map->capacity * sizeof(struct s2n_map_entry)));
/* And finally the map */
- GUARD_AS_RESULT(s2n_free_object((uint8_t **)&map, sizeof(struct s2n_map)));
+ RESULT_GUARD_POSIX(s2n_free_object((uint8_t **)&map, sizeof(struct s2n_map)));
return S2N_RESULT_OK;
}
diff --git a/contrib/restricted/aws/s2n/utils/s2n_mem.c b/contrib/restricted/aws/s2n/utils/s2n_mem.c
index dc14847851..fcb848fec4 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_mem.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_mem.c
@@ -14,7 +14,7 @@
*/
#define _DEFAULT_SOURCE 1
-#if !defined(__APPLE__) && !defined(__FreeBSD__)
+#if defined(S2N_FEATURES_AVAILABLE)
#include <features.h>
#endif
@@ -49,14 +49,14 @@ static int s2n_mem_init_impl(void)
long sysconf_rc = sysconf(_SC_PAGESIZE);
/* sysconf must not error, and page_size cannot be 0 */
- ENSURE_POSIX(sysconf_rc > 0, S2N_FAILURE);
+ POSIX_ENSURE(sysconf_rc > 0, S2N_FAILURE);
/* page_size must be a valid uint32 */
- ENSURE_POSIX(sysconf_rc <= UINT32_MAX, S2N_FAILURE);
+ POSIX_ENSURE(sysconf_rc <= UINT32_MAX, S2N_FAILURE);
page_size = (uint32_t) sysconf_rc;
- if (getenv("S2N_DONT_MLOCK")) {
+ if (getenv("S2N_DONT_MLOCK") || s2n_in_unit_test()) {
s2n_mem_malloc_cb = s2n_mem_malloc_no_mlock_impl;
s2n_mem_free_cb = s2n_mem_free_no_mlock_impl;
}
@@ -73,10 +73,9 @@ static int s2n_mem_cleanup_impl(void)
static int s2n_mem_free_mlock_impl(void *ptr, uint32_t size)
{
- int munlock_rc = munlock(ptr, size);
+ /* Perform a best-effort `munlock`: ignore any errors during unlocking. */
+ munlock(ptr, size);
free(ptr);
- GUARD(munlock_rc);
-
return S2N_SUCCESS;
}
@@ -89,15 +88,15 @@ static int s2n_mem_free_no_mlock_impl(void *ptr, uint32_t size)
static int s2n_mem_malloc_mlock_impl(void **ptr, uint32_t requested, uint32_t *allocated)
{
- notnull_check(ptr);
+ POSIX_ENSURE_REF(ptr);
/* Page aligned allocation required for mlock */
uint32_t allocate;
- GUARD(s2n_align_to(requested, page_size, &allocate));
+ POSIX_GUARD(s2n_align_to(requested, page_size, &allocate));
*ptr = NULL;
- S2N_ERROR_IF(posix_memalign(ptr, page_size, allocate) != 0, S2N_ERR_ALLOC);
+ POSIX_ENSURE(posix_memalign(ptr, page_size, allocate) == 0, S2N_ERR_ALLOC);
*allocated = allocate;
/*
@@ -106,18 +105,18 @@ static int s2n_mem_malloc_mlock_impl(void **ptr, uint32_t requested, uint32_t *a
*/
#if defined(MADV_DONTDUMP) && !defined(S2N_ADDRESS_SANITIZER) && !defined(S2N_FUZZ_TESTING)
if (madvise(*ptr, *allocated, MADV_DONTDUMP) != 0) {
- GUARD(s2n_mem_free_no_mlock_impl(*ptr, *allocated));
- S2N_ERROR(S2N_ERR_MADVISE);
+ POSIX_GUARD(s2n_mem_free_no_mlock_impl(*ptr, *allocated));
+ POSIX_BAIL(S2N_ERR_MADVISE);
}
#endif
if (mlock(*ptr, *allocated) != 0) {
/* When mlock fails, no memory will be locked, so we don't use munlock on free */
- GUARD(s2n_mem_free_no_mlock_impl(*ptr, *allocated));
- S2N_ERROR(S2N_ERR_MLOCK);
+ POSIX_GUARD(s2n_mem_free_no_mlock_impl(*ptr, *allocated));
+ POSIX_BAIL(S2N_ERR_MLOCK);
}
- S2N_ERROR_IF(*ptr == NULL, S2N_ERR_ALLOC);
+ POSIX_ENSURE(*ptr != NULL, S2N_ERR_ALLOC);
return S2N_SUCCESS;
}
@@ -125,7 +124,7 @@ static int s2n_mem_malloc_mlock_impl(void **ptr, uint32_t requested, uint32_t *a
static int s2n_mem_malloc_no_mlock_impl(void **ptr, uint32_t requested, uint32_t *allocated)
{
*ptr = malloc(requested);
- S2N_ERROR_IF(*ptr == NULL, S2N_ERR_ALLOC);
+ POSIX_ENSURE(*ptr != NULL, S2N_ERR_ALLOC);
*allocated = requested;
return S2N_SUCCESS;
@@ -134,12 +133,12 @@ static int s2n_mem_malloc_no_mlock_impl(void **ptr, uint32_t requested, uint32_t
int s2n_mem_set_callbacks(s2n_mem_init_callback mem_init_callback, s2n_mem_cleanup_callback mem_cleanup_callback,
s2n_mem_malloc_callback mem_malloc_callback, s2n_mem_free_callback mem_free_callback)
{
- S2N_ERROR_IF(initialized == true, S2N_ERR_INITIALIZED);
+ POSIX_ENSURE(!initialized, S2N_ERR_INITIALIZED);
- notnull_check(mem_init_callback);
- notnull_check(mem_cleanup_callback);
- notnull_check(mem_malloc_callback);
- notnull_check(mem_free_callback);
+ POSIX_ENSURE_REF(mem_init_callback);
+ POSIX_ENSURE_REF(mem_cleanup_callback);
+ POSIX_ENSURE_REF(mem_malloc_callback);
+ POSIX_ENSURE_REF(mem_free_callback);
s2n_mem_init_cb = mem_init_callback;
s2n_mem_cleanup_cb = mem_cleanup_callback;
@@ -151,11 +150,11 @@ int s2n_mem_set_callbacks(s2n_mem_init_callback mem_init_callback, s2n_mem_clean
int s2n_alloc(struct s2n_blob *b, uint32_t size)
{
- S2N_ERROR_IF(initialized == false, S2N_ERR_NOT_INITIALIZED);
- notnull_check(b);
+ POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
+ POSIX_ENSURE_REF(b);
const struct s2n_blob temp = {0};
*b = temp;
- GUARD(s2n_realloc(b, size));
+ POSIX_GUARD(s2n_realloc(b, size));
return S2N_SUCCESS;
}
@@ -171,9 +170,9 @@ bool s2n_blob_is_growable(const struct s2n_blob* b)
*/
int s2n_realloc(struct s2n_blob *b, uint32_t size)
{
- S2N_ERROR_IF(initialized == false, S2N_ERR_NOT_INITIALIZED);
- notnull_check(b);
- S2N_ERROR_IF(!s2n_blob_is_growable(b), S2N_ERR_RESIZE_STATIC_BLOB);
+ POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
+ POSIX_ENSURE_REF(b);
+ POSIX_ENSURE(s2n_blob_is_growable(b), S2N_ERR_RESIZE_STATIC_BLOB);
if (size == 0) {
return s2n_free(b);
}
@@ -184,8 +183,8 @@ int s2n_realloc(struct s2n_blob *b, uint32_t size)
if (size < b->size) {
/* Zero the existing blob memory before the we release it */
struct s2n_blob slice = {0};
- GUARD(s2n_blob_slice(b, &slice, size, b->size - size));
- GUARD(s2n_blob_zero(&slice));
+ POSIX_GUARD(s2n_blob_slice(b, &slice, size, b->size - size));
+ POSIX_GUARD(s2n_blob_zero(&slice));
}
b->size = size;
@@ -193,19 +192,19 @@ int s2n_realloc(struct s2n_blob *b, uint32_t size)
}
struct s2n_blob new_memory = {.data = NULL, .size = size, .allocated = 0, .growable = 1};
- if(s2n_mem_malloc_cb((void **) &new_memory.data, new_memory.size, &new_memory.allocated) != 0) {
+ if (s2n_mem_malloc_cb((void **) &new_memory.data, new_memory.size, &new_memory.allocated) != 0) {
S2N_ERROR_PRESERVE_ERRNO();
}
- S2N_ERROR_IF(new_memory.allocated < new_memory.size, S2N_ERR_ALLOC);
- S2N_ERROR_IF(new_memory.data == NULL, S2N_ERR_ALLOC);
+ POSIX_ENSURE(new_memory.allocated >= new_memory.size, S2N_ERR_ALLOC);
+ POSIX_ENSURE(new_memory.data != NULL, S2N_ERR_ALLOC);
if (b->size) {
- memcpy_check(new_memory.data, b->data, b->size);
+ POSIX_CHECKED_MEMCPY(new_memory.data, b->data, b->size);
}
if (b->allocated) {
- GUARD(s2n_free(b));
+ POSIX_GUARD(s2n_free(b));
}
*b = new_memory;
@@ -214,11 +213,13 @@ int s2n_realloc(struct s2n_blob *b, uint32_t size)
int s2n_free_object(uint8_t **p_data, uint32_t size)
{
- notnull_check(p_data);
+ POSIX_ENSURE_REF(p_data);
if (*p_data == NULL) {
return S2N_SUCCESS;
}
+
+ POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
struct s2n_blob b = {.data = *p_data, .allocated = size, .size = size, .growable = 1};
/* s2n_free() will call free() even if it returns error (for a growable blob).
@@ -230,22 +231,22 @@ int s2n_free_object(uint8_t **p_data, uint32_t size)
int s2n_dup(struct s2n_blob *from, struct s2n_blob *to)
{
- S2N_ERROR_IF(initialized == false, S2N_ERR_NOT_INITIALIZED);
- eq_check(to->size, 0);
- eq_check(to->data, NULL);
- ne_check(from->size, 0);
- ne_check(from->data, NULL);
+ POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
+ POSIX_ENSURE_EQ(to->size, 0);
+ POSIX_ENSURE_EQ(to->data, NULL);
+ POSIX_ENSURE_NE(from->size, 0);
+ POSIX_ENSURE_NE(from->data, NULL);
- GUARD(s2n_alloc(to, from->size));
+ POSIX_GUARD(s2n_alloc(to, from->size));
- memcpy_check(to->data, from->data, to->size);
+ POSIX_CHECKED_MEMCPY(to->data, from->data, to->size);
return S2N_SUCCESS;
}
int s2n_mem_init(void)
{
- GUARD(s2n_mem_init_cb());
+ POSIX_GUARD(s2n_mem_init_cb());
initialized = true;
@@ -264,8 +265,8 @@ uint32_t s2n_mem_get_page_size(void)
int s2n_mem_cleanup(void)
{
- S2N_ERROR_IF(initialized == false, S2N_ERR_NOT_INITIALIZED);
- GUARD(s2n_mem_cleanup_cb());
+ POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
+ POSIX_GUARD(s2n_mem_cleanup_cb());
initialized = false;
@@ -274,31 +275,31 @@ int s2n_mem_cleanup(void)
int s2n_free(struct s2n_blob *b)
{
- PRECONDITION_POSIX(s2n_blob_validate(b));
+ POSIX_PRECONDITION(s2n_blob_validate(b));
/* To avoid memory leaks, don't exit the function until the memory
has been freed */
int zero_rc = s2n_blob_zero(b);
- S2N_ERROR_IF(initialized == false, S2N_ERR_NOT_INITIALIZED);
- S2N_ERROR_IF(!s2n_blob_is_growable(b), S2N_ERR_FREE_STATIC_BLOB);
+ POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
+ POSIX_ENSURE(s2n_blob_is_growable(b), S2N_ERR_FREE_STATIC_BLOB);
- GUARD(s2n_mem_free_cb(b->data, b->allocated));
+ POSIX_GUARD(s2n_mem_free_cb(b->data, b->allocated));
*b = (struct s2n_blob) {0};
- GUARD(zero_rc);
+ POSIX_GUARD(zero_rc);
return S2N_SUCCESS;
}
int s2n_blob_zeroize_free(struct s2n_blob *b) {
- S2N_ERROR_IF(initialized == false, S2N_ERR_NOT_INITIALIZED);
- notnull_check(b);
+ POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
+ POSIX_ENSURE_REF(b);
- GUARD(s2n_blob_zero(b));
+ POSIX_GUARD(s2n_blob_zero(b));
if (b->allocated) {
- GUARD(s2n_free(b));
+ POSIX_GUARD(s2n_free(b));
}
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/utils/s2n_random.c b/contrib/restricted/aws/s2n/utils/s2n_random.c
index be063883d0..295c87006f 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_random.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_random.c
@@ -28,7 +28,7 @@
#include <errno.h>
#include <time.h>
-#include "s2n.h"
+#include "api/s2n.h"
#if defined(S2N_CPUID_AVAILABLE)
#include <cpuid.h>
@@ -63,7 +63,7 @@ static int entropy_fd = UNINITIALIZED_ENTROPY_FD;
static __thread struct s2n_drbg per_thread_private_drbg = {0};
static __thread struct s2n_drbg per_thread_public_drbg = {0};
-static void *zeroed_when_forked_page;
+static void *zeroed_when_forked_page = NULL;
static int zero = 0;
static __thread void *zero_if_forked_ptr = &zero;
@@ -98,6 +98,10 @@ int s2n_rand_set_callbacks(s2n_rand_init_callback rand_init_callback,
s2n_rand_seed_callback rand_seed_callback,
s2n_rand_mix_callback rand_mix_callback)
{
+ POSIX_ENSURE_REF(rand_init_callback);
+ POSIX_ENSURE_REF(rand_cleanup_callback);
+ POSIX_ENSURE_REF(rand_seed_callback);
+ POSIX_ENSURE_REF(rand_mix_callback);
s2n_rand_init_cb = rand_init_callback;
s2n_rand_cleanup_cb = rand_cleanup_callback;
s2n_rand_seed_cb = rand_seed_callback;
@@ -108,18 +112,18 @@ int s2n_rand_set_callbacks(s2n_rand_init_callback rand_init_callback,
S2N_RESULT s2n_get_seed_entropy(struct s2n_blob *blob)
{
- ENSURE_REF(blob);
+ RESULT_ENSURE_REF(blob);
- GUARD_AS_RESULT(s2n_rand_seed_cb(blob->data, blob->size));
+ RESULT_GUARD_POSIX(s2n_rand_seed_cb(blob->data, blob->size));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_get_mix_entropy(struct s2n_blob *blob)
{
- ENSURE_REF(blob);
+ RESULT_ENSURE_REF(blob);
- GUARD_AS_RESULT(s2n_rand_mix_cb(blob->data, blob->size));
+ RESULT_GUARD_POSIX(s2n_rand_mix_cb(blob->data, blob->size));
return S2N_RESULT_OK;
}
@@ -138,10 +142,10 @@ static inline S2N_RESULT s2n_defend_if_forked(void)
if (zero_if_forked == 0) {
/* Clean up the old drbg first */
- GUARD_RESULT(s2n_rand_cleanup_thread());
+ RESULT_GUARD(s2n_rand_cleanup_thread());
/* Instantiate the new ones */
- GUARD_AS_RESULT(s2n_drbg_instantiate(&per_thread_public_drbg, &public, S2N_AES_128_CTR_NO_DF_PR));
- GUARD_AS_RESULT(s2n_drbg_instantiate(&per_thread_private_drbg, &private, S2N_AES_128_CTR_NO_DF_PR));
+ RESULT_GUARD(s2n_drbg_instantiate(&per_thread_public_drbg, &public, S2N_AES_128_CTR_NO_DF_PR));
+ RESULT_GUARD(s2n_drbg_instantiate(&per_thread_private_drbg, &private, S2N_AES_128_CTR_NO_DF_PR));
zero_if_forked_ptr = zeroed_when_forked_page;
zero_if_forked = 1;
}
@@ -151,7 +155,7 @@ static inline S2N_RESULT s2n_defend_if_forked(void)
S2N_RESULT s2n_get_public_random_data(struct s2n_blob *blob)
{
- GUARD_RESULT(s2n_defend_if_forked());
+ RESULT_GUARD(s2n_defend_if_forked());
uint32_t offset = 0;
uint32_t remaining = blob->size;
@@ -159,9 +163,9 @@ S2N_RESULT s2n_get_public_random_data(struct s2n_blob *blob)
while(remaining) {
struct s2n_blob slice = { 0 };
- GUARD_AS_RESULT(s2n_blob_slice(blob, &slice, offset, MIN(remaining, S2N_DRBG_GENERATE_LIMIT)));;
+ RESULT_GUARD_POSIX(s2n_blob_slice(blob, &slice, offset, MIN(remaining, S2N_DRBG_GENERATE_LIMIT)));;
- GUARD_AS_RESULT(s2n_drbg_generate(&per_thread_public_drbg, &slice));
+ RESULT_GUARD(s2n_drbg_generate(&per_thread_public_drbg, &slice));
remaining -= slice.size;
offset += slice.size;
@@ -172,7 +176,7 @@ S2N_RESULT s2n_get_public_random_data(struct s2n_blob *blob)
S2N_RESULT s2n_get_private_random_data(struct s2n_blob *blob)
{
- GUARD_RESULT(s2n_defend_if_forked());
+ RESULT_GUARD(s2n_defend_if_forked());
uint32_t offset = 0;
uint32_t remaining = blob->size;
@@ -180,9 +184,9 @@ S2N_RESULT s2n_get_private_random_data(struct s2n_blob *blob)
while(remaining) {
struct s2n_blob slice = { 0 };
- GUARD_AS_RESULT(s2n_blob_slice(blob, &slice, offset, MIN(remaining, S2N_DRBG_GENERATE_LIMIT)));;
+ RESULT_GUARD_POSIX(s2n_blob_slice(blob, &slice, offset, MIN(remaining, S2N_DRBG_GENERATE_LIMIT)));;
- GUARD_AS_RESULT(s2n_drbg_generate(&per_thread_private_drbg, &slice));
+ RESULT_GUARD(s2n_drbg_generate(&per_thread_private_drbg, &slice));
remaining -= slice.size;
offset += slice.size;
@@ -193,19 +197,19 @@ S2N_RESULT s2n_get_private_random_data(struct s2n_blob *blob)
S2N_RESULT s2n_get_public_random_bytes_used(uint64_t *bytes_used)
{
- GUARD_AS_RESULT(s2n_drbg_bytes_used(&per_thread_public_drbg, bytes_used));
+ RESULT_GUARD(s2n_drbg_bytes_used(&per_thread_public_drbg, bytes_used));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_get_private_random_bytes_used(uint64_t *bytes_used)
{
- GUARD_AS_RESULT(s2n_drbg_bytes_used(&per_thread_private_drbg, bytes_used));
+ RESULT_GUARD(s2n_drbg_bytes_used(&per_thread_private_drbg, bytes_used));
return S2N_RESULT_OK;
}
static int s2n_rand_urandom_impl(void *ptr, uint32_t size)
{
- ENSURE_POSIX(entropy_fd != UNINITIALIZED_ENTROPY_FD, S2N_ERR_NOT_INITIALIZED);
+ POSIX_ENSURE(entropy_fd != UNINITIALIZED_ENTROPY_FD, S2N_ERR_NOT_INITIALIZED);
uint8_t *data = ptr;
uint32_t n = size;
@@ -260,11 +264,11 @@ S2N_RESULT s2n_public_random(int64_t bound, uint64_t *output)
{
uint64_t r;
- ENSURE_GT(bound, 0);
+ RESULT_ENSURE_GT(bound, 0);
while (1) {
struct s2n_blob blob = {.data = (void *)&r, sizeof(r) };
- GUARD_RESULT(s2n_get_public_random_data(&blob));
+ RESULT_GUARD(s2n_get_public_random_data(&blob));
/* Imagine an int was one byte and UINT_MAX was 256. If the
* caller asked for s2n_random(129, ...) we'd end up in
@@ -288,6 +292,8 @@ S2N_RESULT s2n_public_random(int64_t bound, uint64_t *output)
#if S2N_LIBCRYPTO_SUPPORTS_CUSTOM_RAND
+#define S2N_RAND_ENGINE_ID "s2n_rand"
+
int s2n_openssl_compat_rand(unsigned char *buf, int num)
{
struct s2n_blob out = {.data = buf,.size = num };
@@ -326,7 +332,7 @@ static int s2n_rand_init_impl(void)
if (errno == EINTR) {
goto OPEN;
}
- S2N_ERROR(S2N_ERR_OPEN_RANDOM);
+ POSIX_BAIL(S2N_ERR_OPEN_RANDOM);
}
if (s2n_cpu_supports_rdrand()) {
@@ -340,51 +346,51 @@ S2N_RESULT s2n_rand_init(void)
{
uint32_t pagesize;
- GUARD_AS_RESULT(s2n_rand_init_cb());
+ RESULT_GUARD_POSIX(s2n_rand_init_cb());
pagesize = s2n_mem_get_page_size();
/* We need a single-aligned page for our protected memory region */
- ENSURE(posix_memalign(&zeroed_when_forked_page, pagesize, pagesize) == S2N_SUCCESS, S2N_ERR_OPEN_RANDOM);
- ENSURE(zeroed_when_forked_page != NULL, S2N_ERR_OPEN_RANDOM);
+ RESULT_ENSURE(posix_memalign(&zeroed_when_forked_page, pagesize, pagesize) == S2N_SUCCESS, S2N_ERR_OPEN_RANDOM);
+ RESULT_ENSURE(zeroed_when_forked_page != NULL, S2N_ERR_OPEN_RANDOM);
/* Initialized to zero to ensure that we seed our DRBGs */
zero_if_forked = 0;
/* INHERIT_ZERO and WIPEONFORK reset a page to all-zeroes when a fork occurs */
#if defined(MAP_INHERIT_ZERO)
- ENSURE(minherit(zeroed_when_forked_page, pagesize, MAP_INHERIT_ZERO) != S2N_FAILURE, S2N_ERR_OPEN_RANDOM);
+ RESULT_ENSURE(minherit(zeroed_when_forked_page, pagesize, MAP_INHERIT_ZERO) != S2N_FAILURE, S2N_ERR_OPEN_RANDOM);
#endif
#if defined(MADV_WIPEONFORK)
- ENSURE(madvise(zeroed_when_forked_page, pagesize, MADV_WIPEONFORK) == S2N_SUCCESS, S2N_ERR_OPEN_RANDOM);
+ RESULT_ENSURE(madvise(zeroed_when_forked_page, pagesize, MADV_WIPEONFORK) == S2N_SUCCESS, S2N_ERR_OPEN_RANDOM);
#endif
/* For defence in depth */
- ENSURE(pthread_atfork(NULL, NULL, s2n_on_fork) == S2N_SUCCESS, S2N_ERR_OPEN_RANDOM);
+ RESULT_ENSURE(pthread_atfork(NULL, NULL, s2n_on_fork) == S2N_SUCCESS, S2N_ERR_OPEN_RANDOM);
/* Seed everything */
- GUARD_RESULT(s2n_defend_if_forked());
+ RESULT_GUARD(s2n_defend_if_forked());
#if S2N_LIBCRYPTO_SUPPORTS_CUSTOM_RAND
/* Create an engine */
ENGINE *e = ENGINE_new();
- ENSURE(e != NULL, S2N_ERR_OPEN_RANDOM);
- GUARD_RESULT_OSSL(ENGINE_set_id(e, "s2n_rand"), S2N_ERR_OPEN_RANDOM);
- GUARD_RESULT_OSSL(ENGINE_set_name(e, "s2n entropy generator"), S2N_ERR_OPEN_RANDOM);
- GUARD_RESULT_OSSL(ENGINE_set_flags(e, ENGINE_FLAGS_NO_REGISTER_ALL), S2N_ERR_OPEN_RANDOM);
- GUARD_RESULT_OSSL(ENGINE_set_init_function(e, s2n_openssl_compat_init), S2N_ERR_OPEN_RANDOM);
- GUARD_RESULT_OSSL(ENGINE_set_RAND(e, &s2n_openssl_rand_method), S2N_ERR_OPEN_RANDOM);
- GUARD_RESULT_OSSL(ENGINE_add(e), S2N_ERR_OPEN_RANDOM);
- GUARD_RESULT_OSSL(ENGINE_free(e) , S2N_ERR_OPEN_RANDOM);
+ RESULT_ENSURE(e != NULL, S2N_ERR_OPEN_RANDOM);
+ RESULT_GUARD_OSSL(ENGINE_set_id(e, S2N_RAND_ENGINE_ID), S2N_ERR_OPEN_RANDOM);
+ RESULT_GUARD_OSSL(ENGINE_set_name(e, "s2n entropy generator"), S2N_ERR_OPEN_RANDOM);
+ RESULT_GUARD_OSSL(ENGINE_set_flags(e, ENGINE_FLAGS_NO_REGISTER_ALL), S2N_ERR_OPEN_RANDOM);
+ RESULT_GUARD_OSSL(ENGINE_set_init_function(e, s2n_openssl_compat_init), S2N_ERR_OPEN_RANDOM);
+ RESULT_GUARD_OSSL(ENGINE_set_RAND(e, &s2n_openssl_rand_method), S2N_ERR_OPEN_RANDOM);
+ RESULT_GUARD_OSSL(ENGINE_add(e), S2N_ERR_OPEN_RANDOM);
+ RESULT_GUARD_OSSL(ENGINE_free(e) , S2N_ERR_OPEN_RANDOM);
/* Use that engine for rand() */
- e = ENGINE_by_id("s2n_rand");
- ENSURE(e != NULL, S2N_ERR_OPEN_RANDOM);
- GUARD_RESULT_OSSL(ENGINE_init(e), S2N_ERR_OPEN_RANDOM);
- GUARD_RESULT_OSSL(ENGINE_set_default(e, ENGINE_METHOD_RAND), S2N_ERR_OPEN_RANDOM);
- GUARD_RESULT_OSSL(ENGINE_free(e), S2N_ERR_OPEN_RANDOM);
+ e = ENGINE_by_id(S2N_RAND_ENGINE_ID);
+ RESULT_ENSURE(e != NULL, S2N_ERR_OPEN_RANDOM);
+ RESULT_GUARD_OSSL(ENGINE_init(e), S2N_ERR_OPEN_RANDOM);
+ RESULT_GUARD_OSSL(ENGINE_set_default(e, ENGINE_METHOD_RAND), S2N_ERR_OPEN_RANDOM);
+ RESULT_GUARD_OSSL(ENGINE_free(e), S2N_ERR_OPEN_RANDOM);
#endif
return S2N_RESULT_OK;
@@ -392,9 +398,9 @@ S2N_RESULT s2n_rand_init(void)
static int s2n_rand_cleanup_impl(void)
{
- ENSURE_POSIX(entropy_fd != UNINITIALIZED_ENTROPY_FD, S2N_ERR_NOT_INITIALIZED);
+ POSIX_ENSURE(entropy_fd != UNINITIALIZED_ENTROPY_FD, S2N_ERR_NOT_INITIALIZED);
- GUARD(close(entropy_fd));
+ POSIX_GUARD(close(entropy_fd));
entropy_fd = UNINITIALIZED_ENTROPY_FD;
return S2N_SUCCESS;
@@ -402,18 +408,27 @@ static int s2n_rand_cleanup_impl(void)
S2N_RESULT s2n_rand_cleanup(void)
{
- GUARD_AS_RESULT(s2n_rand_cleanup_cb());
+ RESULT_GUARD_POSIX(s2n_rand_cleanup_cb());
#if S2N_LIBCRYPTO_SUPPORTS_CUSTOM_RAND
/* Cleanup our rand ENGINE in libcrypto */
- ENGINE *rand_engine = ENGINE_by_id("s2n_rand");
+ ENGINE *rand_engine = ENGINE_by_id(S2N_RAND_ENGINE_ID);
if (rand_engine) {
+ ENGINE_remove(rand_engine);
ENGINE_finish(rand_engine);
ENGINE_free(rand_engine);
ENGINE_cleanup();
+ RAND_set_rand_engine(NULL);
+ RAND_set_rand_method(NULL);
}
#endif
+ if (zeroed_when_forked_page != NULL) {
+ free(zeroed_when_forked_page);
+ zeroed_when_forked_page = NULL;
+ zero_if_forked_ptr = &zero;
+ }
+
s2n_rand_init_cb = s2n_rand_init_impl;
s2n_rand_cleanup_cb = s2n_rand_cleanup_impl;
s2n_rand_seed_cb = s2n_rand_urandom_impl;
@@ -424,8 +439,8 @@ S2N_RESULT s2n_rand_cleanup(void)
S2N_RESULT s2n_rand_cleanup_thread(void)
{
- GUARD_AS_RESULT(s2n_drbg_wipe(&per_thread_private_drbg));
- GUARD_AS_RESULT(s2n_drbg_wipe(&per_thread_public_drbg));
+ RESULT_GUARD(s2n_drbg_wipe(&per_thread_private_drbg));
+ RESULT_GUARD(s2n_drbg_wipe(&per_thread_public_drbg));
return S2N_RESULT_OK;
}
@@ -436,8 +451,8 @@ S2N_RESULT s2n_rand_cleanup_thread(void)
*/
S2N_RESULT s2n_set_private_drbg_for_test(struct s2n_drbg drbg)
{
- ENSURE(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST);
- GUARD_AS_RESULT(s2n_drbg_wipe(&per_thread_private_drbg));
+ RESULT_ENSURE(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST);
+ RESULT_GUARD(s2n_drbg_wipe(&per_thread_private_drbg));
per_thread_private_drbg = drbg;
return S2N_RESULT_OK;
@@ -465,7 +480,7 @@ static int s2n_rand_rdrand_impl(void *data, uint32_t size)
uint8_t u8[8];
} output;
- GUARD(s2n_stuffer_init(&stuffer, &out));
+ POSIX_GUARD(s2n_stuffer_init(&stuffer, &out));
while ((space_remaining = s2n_stuffer_space_remaining(&stuffer))) {
unsigned char success = 0;
output.u64 = 0;
@@ -534,15 +549,15 @@ static int s2n_rand_rdrand_impl(void *data, uint32_t size)
}
}
- ENSURE_POSIX(success, S2N_ERR_RDRAND_FAILED);
+ POSIX_ENSURE(success, S2N_ERR_RDRAND_FAILED);
int data_to_fill = MIN(sizeof(output), space_remaining);
- GUARD_POSIX(s2n_stuffer_write_bytes(&stuffer, output.u8, data_to_fill));
+ POSIX_GUARD(s2n_stuffer_write_bytes(&stuffer, output.u8, data_to_fill));
}
return S2N_SUCCESS;
#else
- BAIL_POSIX(S2N_ERR_UNSUPPORTED_CPU);
+ POSIX_BAIL(S2N_ERR_UNSUPPORTED_CPU);
#endif
}
diff --git a/contrib/restricted/aws/s2n/utils/s2n_result.c b/contrib/restricted/aws/s2n/utils/s2n_result.c
index 022fcc3711..a270f6ec1a 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_result.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_result.c
@@ -27,7 +27,7 @@
*
* ```c
* uint8_t s2n_answer_to_the_ultimate_question() {
- * GUARD(s2n_sleep_for_years(7500000));
+ * POSIX_GUARD(s2n_sleep_for_years(7500000));
* return 42;
* }
* ```
@@ -43,7 +43,7 @@
*
* ```c
* int s2n_deep_thought() {
- * GUARD(s2n_answer_to_the_ultimate_question());
+ * POSIX_GUARD(s2n_answer_to_the_ultimate_question());
* return 0;
* }
* ```
@@ -76,7 +76,7 @@
* `warn_unused_result` attribute, which ensures they are GUARDed.
*/
-#include <s2n.h>
+#include "api/s2n.h"
#include <stdbool.h>
#include "utils/s2n_result.h"
@@ -91,3 +91,9 @@ inline bool s2n_result_is_error(s2n_result result)
{
return result.__error_signal == S2N_FAILURE;
}
+
+/* ignores the returned result of a function */
+inline void s2n_result_ignore(s2n_result result)
+{
+ /* noop */
+}
diff --git a/contrib/restricted/aws/s2n/utils/s2n_result.h b/contrib/restricted/aws/s2n/utils/s2n_result.h
index c135c55074..d16635429b 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_result.h
+++ b/contrib/restricted/aws/s2n/utils/s2n_result.h
@@ -15,7 +15,7 @@
#pragma once
-#include <s2n.h>
+#include "api/s2n.h"
#include <stdbool.h>
/* A value which indicates the outcome of a function */
@@ -41,6 +41,16 @@ S2N_RESULT_MUST_USE bool s2n_result_is_ok(s2n_result result);
/* returns true when the result is S2N_RESULT_ERROR */
S2N_RESULT_MUST_USE bool s2n_result_is_error(s2n_result result);
+/**
+ * Ignores the returned result of a function
+ *
+ * Generally, function results should always be checked. Using this function
+ * could cause the system to behave in unexpected ways. As such, this function
+ * should only be used in scenarios where the system state is not affected by
+ * errors.
+ */
+void s2n_result_ignore(s2n_result result);
+
/* used in function declarations to signal function fallibility */
#define S2N_RESULT S2N_RESULT_MUST_USE s2n_result
@@ -48,6 +58,3 @@ S2N_RESULT_MUST_USE bool s2n_result_is_error(s2n_result result);
* We need a version of s2n_result which can be ignored.
*/
#define S2N_CLEANUP_RESULT s2n_result
-
-/* converts the S2N_RESULT into posix error codes */
-#define S2N_RESULT_TO_POSIX( x ) (s2n_result_is_ok(x) ? S2N_SUCCESS : S2N_FAILURE)
diff --git a/contrib/restricted/aws/s2n/utils/s2n_rfc5952.c b/contrib/restricted/aws/s2n/utils/s2n_rfc5952.c
index cab0464a6f..ef49e4aa7c 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_rfc5952.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_rfc5952.c
@@ -17,7 +17,7 @@
#include <sys/socket.h>
#include <stdio.h>
-#include <error/s2n_errno.h>
+#include "error/s2n_errno.h"
#include "utils/s2n_rfc5952.h"
#include "utils/s2n_safety.h"
@@ -31,7 +31,7 @@ S2N_RESULT s2n_inet_ntop(int af, const void *addr, struct s2n_blob *dst)
uint8_t *cursor = dst->data;
if (af == AF_INET) {
- ENSURE(dst->size >= sizeof("111.222.333.444"), S2N_ERR_SIZE_MISMATCH);
+ RESULT_ENSURE(dst->size >= sizeof("111.222.333.444"), S2N_ERR_SIZE_MISMATCH);
for (int i = 0; i < 4; i++) {
if (bytes[i] / 100) {
@@ -50,7 +50,7 @@ S2N_RESULT s2n_inet_ntop(int af, const void *addr, struct s2n_blob *dst)
}
if (af == AF_INET6) {
- ENSURE(dst->size >= sizeof("1111:2222:3333:4444:5555:6666:7777:8888"), S2N_ERR_SIZE_MISMATCH);
+ RESULT_ENSURE(dst->size >= sizeof("1111:2222:3333:4444:5555:6666:7777:8888"), S2N_ERR_SIZE_MISMATCH);
/* See Section 4 of RFC5952 for the rules we are going to follow here
*
@@ -131,5 +131,5 @@ S2N_RESULT s2n_inet_ntop(int af, const void *addr, struct s2n_blob *dst)
return S2N_RESULT_OK;
}
- BAIL(S2N_ERR_INVALID_ARGUMENT);
+ RESULT_BAIL(S2N_ERR_INVALID_ARGUMENT);
}
diff --git a/contrib/restricted/aws/s2n/utils/s2n_safety.c b/contrib/restricted/aws/s2n/utils/s2n_safety.c
index 13c741b3ec..b26e2d9c41 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_safety.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_safety.c
@@ -13,12 +13,6 @@
* permissions and limitations under the License.
*/
-#define _GNU_SOURCE /* For syscall on Linux */
-#undef _POSIX_C_SOURCE /* For syscall() on Mac OS X */
-
-#include <unistd.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
#include <stdint.h>
#include <stdio.h>
@@ -26,22 +20,6 @@
#include "utils/s2n_safety.h"
/**
- * Get the process id
- *
- * Returns:
- * The process ID of the current process
- */
-pid_t s2n_actual_getpid()
-{
-#if defined(__GNUC__) && defined(SYS_getpid)
- /* http://yarchive.net/comp/linux/getpid_caching.html */
- return (pid_t) syscall(SYS_getpid);
-#else
- return getpid();
-#endif
-}
-
-/**
* Given arrays "a" and "b" of length "len", determine whether they
* hold equal contents.
*
@@ -55,27 +33,48 @@ pid_t s2n_actual_getpid()
* Returns:
* Whether all bytes in arrays "a" and "b" are identical
*/
-bool s2n_constant_time_equals(const uint8_t * a, const uint8_t * b, const uint32_t len)
+bool s2n_constant_time_equals(const uint8_t *a, const uint8_t *b, const uint32_t len)
{
S2N_PUBLIC_INPUT(a);
S2N_PUBLIC_INPUT(b);
S2N_PUBLIC_INPUT(len);
- ENSURE_POSIX((a == NULL) || S2N_MEM_IS_READABLE(a, len), S2N_ERR_SAFETY);
- ENSURE_POSIX((b == NULL) || S2N_MEM_IS_READABLE(b, len), S2N_ERR_SAFETY);
- if (len != 0 && (a == NULL || b == NULL)) {
- return false;
+ /* if len is 0, they're always going to be equal */
+ if (len == 0) {
+ return true;
}
- uint8_t xor = 0;
- for (int i = 0; i < len; i++) {
+ /* check if a and b are readable - if so, allow them to increment their pointer */
+ uint8_t a_inc = S2N_MEM_IS_READABLE(a, len) ? 1 : 0;
+ uint8_t b_inc = S2N_MEM_IS_READABLE(b, len) ? 1 : 0;
+
+ /* reserve a stand-in pointer to replace NULL pointers */
+ static uint8_t standin = 0;
+
+ /* if the pointers can increment their values, then use the
+ * original value; otherwise use the stand-in */
+ const uint8_t *a_ptr = a_inc ? a : &standin;
+ const uint8_t *b_ptr = b_inc ? b : &standin;
+
+ /* start by assuming they are equal only if both increment their pointer */
+ uint8_t xor = !((a_inc == 1) & (b_inc == 1));
+
+ /* iterate over each byte in the slices */
+ for (uint32_t i = 0; i < len; i++) {
/* Invariants must hold for each execution of the loop
- * and at loop exit, hence the <= */
+ * and at loop exit, hence the <= */
S2N_INVARIANT(i <= len);
- xor |= a[i] ^ b[i];
+
+ /* mix the current cursor values in to the result */
+ xor |= *a_ptr ^ *b_ptr;
+
+ /* increment the pointers by their "inc" values */
+ a_ptr += a_inc;
+ b_ptr += b_inc;
}
- return !xor;
+ /* finally check to make sure xor is still 0 */
+ return (xor == 0);
}
/**
@@ -94,16 +93,7 @@ int s2n_constant_time_copy_or_dont(uint8_t * dest, const uint8_t * src, uint32_t
S2N_PUBLIC_INPUT(src);
S2N_PUBLIC_INPUT(len);
-/* This underflows a value of 0 to the maximum value via arithmetic underflow,
- * so the check for arithmetic overflow/underflow needs to be disabled for CBMC.
- * Additionally, uint_fast16_t is defined as the fastest available unsigned
- * integer with 16 bits or greater, and is not guaranteed to be 16 bits long.
- * To handle this, the conversion overflow check also needs to be enabled. */
-#pragma CPROVER check push
-#pragma CPROVER check disable "conversion"
-#pragma CPROVER check disable "unsigned-overflow"
- uint8_t mask = ((uint_fast16_t)((uint_fast16_t)(dont) - 1)) >> 8;
-#pragma CPROVER check pop
+ uint8_t mask = (((0xFFFF & dont) - 1) >> 8) & 0xFF;
/* dont = 0 : mask = 0xff */
/* dont > 0 : mask = 0x00 */
@@ -147,33 +137,19 @@ int s2n_constant_time_pkcs1_unpad_or_dont(uint8_t * dst, const uint8_t * src, ui
dont_copy |= src[0] ^ 0x00;
dont_copy |= src[1] ^ 0x02;
+ dont_copy |= *(start_of_data-1) ^ 0x00;
-/* Since -1 is being used, we need to disable the pointer overflow check for CBMC. */
-#pragma CPROVER check push
-#pragma CPROVER check disable "pointer-overflow"
- dont_copy |= start_of_data[-1] ^ 0x00;
-#pragma CPROVER check pop
-
-/* This underflows a value of 0 to the maximum value via arithmetic underflow,
- * so the check for arithmetic overflow/underflow needs to be disabled for CBMC.
- * Additionally, uint_fast16_t is defined as the fastest available unsigned
- * integer with 16 bits or greater, and is not guaranteed to be 16 bits long.
- * To handle this, the conversion overflow check also needs to be enabled. */
-#pragma CPROVER check push
-#pragma CPROVER check disable "conversion"
-#pragma CPROVER check disable "unsigned-overflow"
for (uint32_t i = 2; i < srclen - expectlen - 1; i++) {
/* Note! We avoid using logical NOT (!) here; while in practice
* many compilers will use constant-time sequences for this operator,
* at least on x86 (e.g. cmp -> setcc, or vectorized pcmpeq), this is
* not guaranteed to hold, and some architectures might not have a
* convenient mechanism for generating a branchless logical not. */
- uint8_t mask = ((uint_fast16_t)((uint_fast16_t)(src[i]) - 1)) >> 8;
+ uint8_t mask = (((0xFFFF & src[i]) - 1) >> 8) & 0xFF;
/* src[i] = 0 : mask = 0xff */
/* src[i] > 0 : mask = 0x00 */
dont_copy |= mask;
}
-#pragma CPROVER check pop
s2n_constant_time_copy_or_dont(dst, start_of_data, expectlen, dont_copy);
@@ -195,8 +171,8 @@ int s2n_in_unit_test_set(bool newval)
int s2n_align_to(uint32_t initial, uint32_t alignment, uint32_t* out)
{
- notnull_check(out);
- ENSURE_POSIX(alignment != 0, S2N_ERR_SAFETY);
+ POSIX_ENSURE_REF(out);
+ POSIX_ENSURE(alignment != 0, S2N_ERR_SAFETY);
if (initial == 0) {
*out = 0;
return S2N_SUCCESS;
@@ -204,33 +180,33 @@ int s2n_align_to(uint32_t initial, uint32_t alignment, uint32_t* out)
const uint64_t i = initial;
const uint64_t a = alignment;
const uint64_t result = a * (((i - 1) / a) + 1);
- S2N_ERROR_IF(result > UINT32_MAX, S2N_ERR_INTEGER_OVERFLOW);
+ POSIX_ENSURE(result <= UINT32_MAX, S2N_ERR_INTEGER_OVERFLOW);
*out = (uint32_t) result;
return S2N_SUCCESS;
}
int s2n_mul_overflow(uint32_t a, uint32_t b, uint32_t* out)
{
- notnull_check(out);
+ POSIX_ENSURE_REF(out);
const uint64_t result = ((uint64_t) a) * ((uint64_t) b);
- S2N_ERROR_IF(result > UINT32_MAX, S2N_ERR_INTEGER_OVERFLOW);
+ POSIX_ENSURE(result <= UINT32_MAX, S2N_ERR_INTEGER_OVERFLOW);
*out = (uint32_t) result;
return S2N_SUCCESS;
}
int s2n_add_overflow(uint32_t a, uint32_t b, uint32_t* out)
{
- notnull_check(out);
+ POSIX_ENSURE_REF(out);
uint64_t result = ((uint64_t) a) + ((uint64_t) b);
- S2N_ERROR_IF(result > UINT32_MAX, S2N_ERR_INTEGER_OVERFLOW);
+ POSIX_ENSURE(result <= UINT32_MAX, S2N_ERR_INTEGER_OVERFLOW);
*out = (uint32_t) result;
return S2N_SUCCESS;
}
int s2n_sub_overflow(uint32_t a, uint32_t b, uint32_t* out)
{
- notnull_check(out);
- S2N_ERROR_IF(a < b, S2N_ERR_INTEGER_OVERFLOW);
+ POSIX_ENSURE_REF(out);
+ POSIX_ENSURE(a >= b, S2N_ERR_INTEGER_OVERFLOW);
*out = a - b;
return S2N_SUCCESS;
}
diff --git a/contrib/restricted/aws/s2n/utils/s2n_safety.h b/contrib/restricted/aws/s2n/utils/s2n_safety.h
index 67a0328901..438ee2c9d6 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_safety.h
+++ b/contrib/restricted/aws/s2n/utils/s2n_safety.h
@@ -16,7 +16,6 @@
#pragma once
#include <string.h>
-#include <sys/types.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdlib.h>
@@ -24,9 +23,7 @@
#include "error/s2n_errno.h"
#include "utils/s2n_ensure.h"
#include "utils/s2n_result.h"
-
-/* Success signal value for OpenSSL functions */
-#define _OSSL_SUCCESS 1
+#include "utils/s2n_safety_macros.h"
/**
* The goal of s2n_safety is to provide helpers to perform common
@@ -34,279 +31,9 @@
*/
/**
- * Sets the global `errno` and returns with a `S2N_RESULT_ERROR`
- */
-#define BAIL( x ) do { _S2N_ERROR( ( x ) ); return S2N_RESULT_ERROR; } while (0)
-
-/**
- * Sets the global `errno` and returns with a POSIX error (`-1`)
- */
-#define BAIL_POSIX( x ) do { _S2N_ERROR( ( x ) ); return S2N_FAILURE; } while (0)
-
-/**
- * Sets the global `errno` and returns with a `NULL` pointer value
- */
-#define BAIL_PTR( x ) do { _S2N_ERROR( ( x ) ); return NULL; } while (0)
-
-/**
- * Ensures the `condition` is `true`, otherwise the function will `BAIL` with an `error`
- */
-#define ENSURE( condition , error ) __S2N_ENSURE((condition), BAIL(error))
-
-/**
- * Ensures the `result` is OK, otherwise the function will `BAIL` with an `error`
- */
-#define ENSURE_OK( result , error ) __S2N_ENSURE(s2n_result_is_ok(result), BAIL(error))
-
-/**
- * Ensures `n` is greater than or equal to `min`, otherwise the function will `BAIL` with a `S2N_ERR_SAFETY` error
- */
-#define ENSURE_GTE( n , min ) ENSURE((n) >= (min), S2N_ERR_SAFETY)
-
-/**
- * Ensures `n` is less than or equal to `max`, otherwise the function will `BAIL` with a `S2N_ERR_SAFETY` error
- */
-#define ENSURE_LTE( n , max ) ENSURE((n) <= (max), S2N_ERR_SAFETY)
-
-/**
- * Ensures `n` is greater than `min`, otherwise the function will `BAIL` with a `S2N_ERR_SAFETY` error
- */
-#define ENSURE_GT( n , min ) ENSURE((n) > (min), S2N_ERR_SAFETY)
-
-/**
- * Ensures `n` is less than `min`, otherwise the function will `BAIL` with a `S2N_ERR_SAFETY` error
- */
-#define ENSURE_LT( n , max ) ENSURE((n) < (max), S2N_ERR_SAFETY)
-
-/**
- * Ensures `a` is equal to `b`, otherwise the function will `BAIL` with a `S2N_ERR_SAFETY` error
- */
-#define ENSURE_EQ( a , b ) ENSURE((a) == (b), S2N_ERR_SAFETY)
-
-/**
- * Ensures `a` is not equal to `b`, otherwise the function will `BAIL` with a `S2N_ERR_SAFETY` error
- */
-#define ENSURE_NE( a , b ) ENSURE((a) != (b), S2N_ERR_SAFETY)
-
-/**
- * Ensures the `condition` is `true`, otherwise the function will `BAIL_POSIX` with an `error`
- */
-#define ENSURE_POSIX( condition , error ) __S2N_ENSURE((condition), BAIL_POSIX(error))
-
-/**
- * Ensures the `condition` is `true`, otherwise the function will `BAIL_PTR` with an `error`
- */
-#define ENSURE_PTR( condition , error ) __S2N_ENSURE((condition), BAIL_PTR(error))
-
-/**
- * Ensures `x` is not `NULL`, otherwise the function will `BAIL_PTR` with an `error`
- */
-#define ENSURE_REF_PTR( x ) ENSURE_PTR(S2N_OBJECT_PTR_IS_READABLE(x), S2N_ERR_NULL)
-
-/**
- * Ensures `x` is a readable reference, otherwise the function will `BAIL` with `S2N_ERR_NULL`
- */
-#define ENSURE_REF( x ) ENSURE(S2N_OBJECT_PTR_IS_READABLE(x), S2N_ERR_NULL)
-
-/**
- * Ensures `x` is a readable reference, otherwise the function will `BAIL_POSIX` with `S2N_ERR_NULL`
- */
-#define ENSURE_POSIX_REF( x ) ENSURE_POSIX(S2N_OBJECT_PTR_IS_READABLE(x), S2N_ERR_NULL)
-
-/**
- * Ensures `x` is a mutable reference, otherwise the function will `BAIL` with `S2N_ERR_NULL`
- */
-#define ENSURE_MUT( x ) ENSURE(S2N_OBJECT_PTR_IS_WRITABLE(x), S2N_ERR_NULL)
-
-/**
- * Ensures `x` is a mutable reference, otherwise the function will `BAIL_POSIX` with `S2N_ERR_NULL`
- */
-#define ENSURE_POSIX_MUT( x ) ENSURE_POSIX(S2N_OBJECT_PTR_IS_WRITABLE(x), S2N_ERR_NULL)
-
-/**
- * Ensures `min <= n <= max`
- */
-#define ENSURE_INCLUSIVE_RANGE( min , n , max ) \
- do { \
- __typeof( n ) __tmp_n = ( n ); \
- ENSURE_GTE(__tmp_n, min); \
- ENSURE_LTE(__tmp_n, max); \
- } while(0)
-
-/**
- * Ensures `min < n < max`
- */
-#define ENSURE_EXCLUSIVE_RANGE( min , n , max ) \
- do { \
- __typeof( n ) __tmp_n = ( n ); \
- ENSURE_GT(__tmp_n, min); \
- ENSURE_LT(__tmp_n, max); \
- } while(0)
-
-/**
- * Ensures the `result` is `S2N_RESULT_OK`, otherwise the function will return an error signal
- */
-#define PRECONDITION( result ) GUARD_RESULT(__S2N_ENSURE_PRECONDITION(result))
-
-/**
- * Ensures the `result` is `S2N_RESULT_OK`, otherwise the function will return an error signal
- */
-#define POSTCONDITION( result ) GUARD_RESULT(__S2N_ENSURE_POSTCONDITION(result))
-
-/**
- * Ensures the `result` is `S2N_RESULT_OK`, otherwise the function will return an error signal
- */
-#define PRECONDITION_POSIX( result ) GUARD_AS_POSIX(__S2N_ENSURE_PRECONDITION(result))
-
-/**
- * Ensures the `result` is `S2N_RESULT_OK`, otherwise the function will return an error signal
- */
-#define POSTCONDITION_POSIX( result ) GUARD_AS_POSIX(__S2N_ENSURE_POSTCONDITION(result))
-
-/**
- * Ensures the `condition` is `true`, otherwise the function will `BAIL` with an `error`.
- * When the code is built in debug mode, they are checked.
- * When the code is built in production mode, they are ignored.
- */
-#define DEBUG_ENSURE( condition, error ) __S2N_ENSURE_DEBUG((condition), BAIL(error))
-
-/**
- * Ensures the `condition` is `true`, otherwise the function will `BAIL_POSIX` with an `error`.
- * When the code is built in debug mode, they are checked.
- * When the code is built in production mode, they are ignored.
- */
-#define DEBUG_ENSURE_POSIX( condition, error ) __S2N_ENSURE_DEBUG((condition), BAIL_POSIX(error))
-
-/**
- * Ensures `x` is not an error, otherwise the function will return an error signal
- *
- * Note: this currently accepts POSIX error signals but will transition to accept s2n_result
- */
-#define GUARD( x ) GUARD_POSIX(x)
-
-/**
- * Ensures `x` is not an error, otherwise goto `label`
- *
- * Note: this currently accepts POSIX error signals but will transition to accept s2n_result
- */
-#define GUARD_GOTO( x , label ) GUARD_POSIX_GOTO((x), (label))
-
-/**
- * Ensures `x` is not an error, otherwise the function will return `NULL`
- *
- * Note: this currently accepts POSIX error signals but will transition to accept s2n_result
- */
-#define GUARD_PTR( x ) GUARD_POSIX_PTR(x)
-
-/**
- * Ensures `x` is not `NULL`, otherwise the function will return an error signal
- *
- * Note: this currently accepts POSIX error signals but will transition to accept s2n_result
- */
-#define GUARD_NONNULL( x ) GUARD_POSIX_NONNULL(x)
-
-/**
- * Ensures `x` is not `NULL`, otherwise goto `label`
- *
- * Note: this currently accepts POSIX error signals but will transition to accept s2n_result
- */
-#define GUARD_NONNULL_GOTO( x , label ) __S2N_ENSURE((x) != NULL, goto label)
-
-/**
- * Ensures `x` is not `NULL`, otherwise the function will return `NULL`
- *
- * Note: this currently accepts POSIX error signals but will transition to accept s2n_result
- */
-#define GUARD_NONNULL_PTR( x ) __S2N_ENSURE((x) != NULL, return NULL)
-
-/**
- * Ensures `x` is not a OpenSSL error, otherwise the function will return an error signal
- *
- * Note: this currently accepts POSIX error signals but will transition to accept s2n_result
- */
-#define GUARD_OSSL( x, error ) GUARD_POSIX_OSSL((x), (error))
-
-/**
- * Ensures `x` is ok, otherwise the function will return an `S2N_RESULT_ERROR`
- */
-#define GUARD_RESULT( x ) __S2N_ENSURE(s2n_result_is_ok(x), return S2N_RESULT_ERROR)
-
-/**
- * Ensures `x` is ok, otherwise goto `label`
- */
-#define GUARD_RESULT_GOTO( x, label ) __S2N_ENSURE(s2n_result_is_ok(x), goto label)
-
-/**
- * Ensures `x` is ok, otherwise the function will return `NULL`
- */
-#define GUARD_RESULT_PTR( x ) __S2N_ENSURE(s2n_result_is_ok(x), return NULL)
-
-/**
- * Ensures `x` is not `NULL`, otherwise the function will return an `S2N_RESULT_ERROR`
- */
-#define GUARD_RESULT_NONNULL( x ) __S2N_ENSURE((x) != NULL, return S2N_RESULT_ERROR)
-
-/**
- * Ensures `x` is not a OpenSSL error, otherwise the function will `BAIL` with `error`
- */
-/* TODO: use the OSSL error code in error reporting https://github.com/awslabs/s2n/issues/705 */
-#define GUARD_RESULT_OSSL( x , error ) ENSURE((x) == _OSSL_SUCCESS, error)
-
-/**
- * Ensures `x` is not a POSIX error, otherwise return a POSIX error
- */
-#define GUARD_POSIX( x ) __S2N_ENSURE((x) >= S2N_SUCCESS, return S2N_FAILURE)
-
-/**
- * Ensures `x` is strictly not a POSIX error (`-1`), otherwise goto `label`
- */
-#define GUARD_POSIX_STRICT( x ) __S2N_ENSURE((x) == S2N_SUCCESS, return S2N_FAILURE)
-
-/**
- * Ensures `x` is not a POSIX error, otherwise goto `label`
- */
-#define GUARD_POSIX_GOTO( x , label ) __S2N_ENSURE((x) >= S2N_SUCCESS, goto label)
-
-/**
- * Ensures `x` is not a POSIX error, otherwise the function will return `NULL`
- */
-#define GUARD_POSIX_PTR( x ) __S2N_ENSURE((x) >= S2N_SUCCESS, return NULL)
-
-/**
- * Ensures `x` is not `NULL`, otherwise the function will return a POSIX error (`-1`)
- */
-#define GUARD_POSIX_NONNULL( x ) __S2N_ENSURE((x) != NULL, return S2N_FAILURE)
-
-/**
- * Ensures `x` is not a OpenSSL error, otherwise the function will `BAIL` with `error`
- */
-/* TODO: use the OSSL error code in error reporting https://github.com/awslabs/s2n/issues/705 */
-#define GUARD_POSIX_OSSL( x , error ) ENSURE_POSIX((x) == _OSSL_SUCCESS, error)
-
-/**
- * Ensures `x` is not a POSIX error, otherwise the function will return a `S2N_RESULT_ERROR`
- */
-#define GUARD_AS_RESULT( x ) __S2N_ENSURE((x) >= S2N_SUCCESS, return S2N_RESULT_ERROR)
-
-/**
- * Ensures `x` is OK (S2N_RESULT), otherwise the function will return a POSIX error (`-1`)
- */
-#define GUARD_AS_POSIX( x ) __S2N_ENSURE(s2n_result_is_ok(x), return S2N_FAILURE)
-
-/**
- * Performs a safe memcpy
- */
-#define CHECKED_MEMCPY( d , s , n ) __S2N_ENSURE_SAFE_MEMCPY((d), (s), (n), GUARD_RESULT_NONNULL)
-
-/**
- * Performs a safe memset
- */
-#define CHECKED_MEMSET( d , c , n ) __S2N_ENSURE_SAFE_MEMSET((d), (c), (n), ENSURE_REF)
-
-/**
* Marks a case of a switch statement as able to fall through to the next case
*/
-#if (defined(__clang__) && __clang_major__ >= 10) || (defined(__GNUC__) && __GNUC__ >= 7)
+#if defined(S2N_FALL_THROUGH_SUPPORTED)
# define FALL_THROUGH __attribute__((fallthrough))
#else
# define FALL_THROUGH ((void)0)
@@ -321,14 +48,6 @@ int s2n_in_unit_test_set(bool newval);
#define S2N_IN_INTEG_TEST ( getenv("S2N_INTEG_TEST") != NULL )
#define S2N_IN_TEST ( s2n_in_unit_test() || S2N_IN_INTEG_TEST )
-/**
- * Get the process id
- *
- * Returns:
- * The process ID of the current process
- */
-extern pid_t s2n_actual_getpid();
-
/* Returns 1 if a and b are equal, in constant time */
extern bool s2n_constant_time_equals(const uint8_t * a, const uint8_t * b, const uint32_t len);
@@ -345,6 +64,30 @@ extern int s2n_constant_time_pkcs1_unpad_or_dont(uint8_t * dst, const uint8_t *
*/
#define DEFER_CLEANUP(_thealloc, _thecleanup) \
__attribute__((cleanup(_thecleanup))) _thealloc
+/**
+ * Often we want to free memory on an error, but not on a success.
+ * We do this by declaring a variable with DEFER_CLEANUP, then zeroing
+ * that variable after success to prevent DEFER_CLEANUP from accessing
+ * and freeing any memory it allocated.
+ *
+ * This pattern is not intuitive, so a named macro makes it more readable.
+ */
+#define ZERO_TO_DISABLE_DEFER_CLEANUP(_thealloc) memset(&_thealloc, 0, sizeof(_thealloc))
+
+/* We want to apply blinding whenever `action` fails.
+ * Unfortunately, because functions in S2N do not have a consistent return type, determining failure is difficult.
+ * Instead, let's rely on the consistent error handling behavior of returning from a method early on error
+ * and apply blinding if our tracking variable goes out of scope early.
+ */
+S2N_CLEANUP_RESULT s2n_connection_apply_error_blinding(struct s2n_connection **conn);
+#define WITH_ERROR_BLINDING(conn, action) do { \
+ DEFER_CLEANUP(struct s2n_connection *_conn_to_blind = conn, s2n_connection_apply_error_blinding); \
+ action; \
+ /* The `if` here is to avoid a redundantInitialization warning from cppcheck */ \
+ if (_conn_to_blind) { \
+ _conn_to_blind = NULL; \
+ } \
+} while (0)
/* Creates cleanup function for pointers from function func which accepts a pointer.
* This is useful for DEFER_CLEANUP as it passes &_thealloc into _thecleanup function,
@@ -368,41 +111,3 @@ extern int s2n_mul_overflow(uint32_t a, uint32_t b, uint32_t* out);
extern int s2n_align_to(uint32_t initial, uint32_t alignment, uint32_t* out);
extern int s2n_add_overflow(uint32_t a, uint32_t b, uint32_t* out);
extern int s2n_sub_overflow(uint32_t a, uint32_t b, uint32_t* out);
-/* START COMPATIBILITY LAYER */
-
-/**
- * NOTE: This will be removed once everything is using s2n_result
- */
-
-/* `NULL` check a pointer */
-
-/* Note: this macro is replaced by ENSURE_POSIX_REF */
-#define notnull_check(ptr) ENSURE_POSIX_REF(ptr)
-/* Note: this macro is replaced by ENSURE_REF_PTR */
-#define notnull_check_ptr(ptr) ENSURE_REF_PTR(ptr)
-
-/* Range check a number */
-#define gte_check( n , min ) ENSURE_POSIX((n) >= (min), S2N_ERR_SAFETY)
-#define lte_check( n , max ) ENSURE_POSIX((n) <= (max), S2N_ERR_SAFETY)
-#define gt_check( n , min ) ENSURE_POSIX((n) > (min), S2N_ERR_SAFETY)
-#define lt_check( n , max ) ENSURE_POSIX((n) < (max), S2N_ERR_SAFETY)
-#define eq_check( a , b ) ENSURE_POSIX((a) == (b), S2N_ERR_SAFETY)
-#define ne_check( a , b ) ENSURE_POSIX((a) != (b), S2N_ERR_SAFETY)
-#define inclusive_range_check( low, n, high ) \
- do { \
- __typeof( n ) __tmp_n = ( n ); \
- gte_check(__tmp_n, low); \
- lte_check(__tmp_n, high); \
- } while (0)
-#define exclusive_range_check( low, n, high ) \
- do { \
- __typeof( n ) __tmp_n = ( n ); \
- gt_check(__tmp_n, low); \
- lt_check(__tmp_n, high); \
- } while (0)
-
-#define memcpy_check( d , s , n ) __S2N_ENSURE_SAFE_MEMCPY((d), (s), (n), GUARD_POSIX_NONNULL)
-/* This will fail to build if d is an array. Cast the array to a pointer first! */
-#define memset_check( d , c , n ) __S2N_ENSURE_SAFE_MEMSET((d), (c), (n), ENSURE_POSIX_REF)
-
-/* END COMPATIBILITY LAYER */
diff --git a/contrib/restricted/aws/s2n/utils/s2n_safety_macros.h b/contrib/restricted/aws/s2n/utils/s2n_safety_macros.h
new file mode 100644
index 0000000000..e478724da0
--- /dev/null
+++ b/contrib/restricted/aws/s2n/utils/s2n_safety_macros.h
@@ -0,0 +1,609 @@
+
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#pragma once
+
+/**
+ * DO NOT DIRECTLY MODIFY THIS FILE:
+ *
+ * The code in this file is generated from scripts/s2n_safety_macros.py and any modifications
+ * should be in there.
+ */
+
+#include "error/s2n_errno.h"
+#include "utils/s2n_ensure.h"
+#include "utils/s2n_result.h"
+
+/**
+ * The goal of s2n_safety is to provide helpers to perform common
+ * checks, which help with code readability.
+ */
+
+/* Success signal value for OpenSSL functions */
+#define _OSSL_SUCCESS 1
+
+/**
+ * Sets the global `s2n_errno` to `error` and returns with an `S2N_RESULT_ERROR`
+ */
+#define RESULT_BAIL(error) do { _S2N_ERROR((error)); return S2N_RESULT_ERROR; } while (0)
+
+/**
+ * Ensures the `condition` is `true`, otherwise the function will `RESULT_BAIL` with `error`
+ */
+#define RESULT_ENSURE(condition, error) __S2N_ENSURE((condition), RESULT_BAIL(error))
+
+/**
+ * Ensures the `condition` is `true`, otherwise the function will `RESULT_BAIL` with `error`
+ *
+ * NOTE: The condition will _only_ be checked when the code is compiled in debug mode.
+ * In release mode, the check is removed.
+ */
+#define RESULT_DEBUG_ENSURE(condition, error) __S2N_ENSURE_DEBUG((condition), RESULT_BAIL(error))
+
+/**
+ * Ensures `s2n_result_is_ok(result)`, otherwise the function will `RESULT_BAIL` with `error`
+ *
+ * This can be useful for overriding the global `s2n_errno`
+ */
+#define RESULT_ENSURE_OK(result, error) __S2N_ENSURE(s2n_result_is_ok(result), RESULT_BAIL(error))
+
+/**
+ * Ensures `a` is greater than or equal to `b`, otherwise the function will `RESULT_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define RESULT_ENSURE_GTE(a, b) __S2N_ENSURE((a) >= (b), RESULT_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * Ensures `a` is less than or equal to `b`, otherwise the function will `RESULT_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define RESULT_ENSURE_LTE(a, b) __S2N_ENSURE((a) <= (b), RESULT_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * Ensures `a` is greater than `b`, otherwise the function will `RESULT_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define RESULT_ENSURE_GT(a, b) __S2N_ENSURE((a) > (b), RESULT_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * Ensures `a` is less than `b`, otherwise the function will `RESULT_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define RESULT_ENSURE_LT(a, b) __S2N_ENSURE((a) < (b), RESULT_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * Ensures `a` is equal to `b`, otherwise the function will `RESULT_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define RESULT_ENSURE_EQ(a, b) __S2N_ENSURE((a) == (b), RESULT_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * Ensures `a` is not equal to `b`, otherwise the function will `RESULT_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define RESULT_ENSURE_NE(a, b) __S2N_ENSURE((a) != (b), RESULT_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * Ensures `min <= n <= max`, otherwise the function will `RESULT_BAIL` with `S2N_ERR_SAFETY`
+ */
+#define RESULT_ENSURE_INCLUSIVE_RANGE(min, n, max) \
+ do { \
+ __typeof(n) __tmp_n = ( n ); \
+ __typeof(n) __tmp_min = ( min ); \
+ __typeof(n) __tmp_max = ( max ); \
+ RESULT_ENSURE_GTE(__tmp_n, __tmp_min); \
+ RESULT_ENSURE_LTE(__tmp_n, __tmp_max); \
+ } while(0)
+
+/**
+ * Ensures `min < n < max`, otherwise the function will `RESULT_BAIL` with `S2N_ERR_SAFETY`
+ */
+#define RESULT_ENSURE_EXCLUSIVE_RANGE(min, n, max) \
+ do { \
+ __typeof(n) __tmp_n = ( n ); \
+ __typeof(n) __tmp_min = ( min ); \
+ __typeof(n) __tmp_max = ( max ); \
+ RESULT_ENSURE_GT(__tmp_n, __tmp_min); \
+ RESULT_ENSURE_LT(__tmp_n, __tmp_max); \
+ } while(0)
+
+/**
+ * Ensures `x` is a readable reference, otherwise the function will `RESULT_BAIL` with `S2N_ERR_NULL`
+ */
+#define RESULT_ENSURE_REF(x) __S2N_ENSURE(S2N_OBJECT_PTR_IS_READABLE(x), RESULT_BAIL(S2N_ERR_NULL))
+
+/**
+ * Ensures `x` is a mutable reference, otherwise the function will `RESULT_BAIL` with `S2N_ERR_NULL`
+ */
+#define RESULT_ENSURE_MUT(x) __S2N_ENSURE(S2N_OBJECT_PTR_IS_WRITABLE(x), RESULT_BAIL(S2N_ERR_NULL))
+
+/**
+ * Ensures the `result` is `S2N_RESULT_OK`, otherwise the function will return an error signal
+ *
+ * `RESULT_PRECONDITION` should be used at the beginning of a function to make assertions about
+ * the provided arguments. By default, it is functionally equivalent to `RESULT_GUARD(result)`
+ * but can be altered by a testing environment to provide additional guarantees.
+ */
+#define RESULT_PRECONDITION(result) RESULT_GUARD(__S2N_ENSURE_PRECONDITION((result)))
+
+/**
+ * Ensures the `result` is `S2N_RESULT_OK`, otherwise the function will return an error signal
+ *
+ * NOTE: The condition will _only_ be checked when the code is compiled in debug mode.
+ * In release mode, the check is removed.
+ *
+ * `RESULT_POSTCONDITION` should be used at the end of a function to make assertions about
+ * the resulting state. In debug mode, it is functionally equivalent to `RESULT_GUARD(result)`.
+ * In production builds, it becomes a no-op. This can also be altered by a testing environment
+ * to provide additional guarantees.
+ */
+#define RESULT_POSTCONDITION(result) RESULT_GUARD(__S2N_ENSURE_POSTCONDITION((result)))
+
+/**
+ * Performs a safer memcpy.
+ *
+ * The following checks are performed:
+ *
+ * * `destination` is non-null
+ * * `source` is non-null
+ *
+ * Callers will still need to ensure the following:
+ *
+ * * The size of the data pointed to by both the `destination` and `source` parameters,
+ * shall be at least `len` bytes.
+ */
+#define RESULT_CHECKED_MEMCPY(destination, source, len) __S2N_ENSURE_SAFE_MEMCPY((destination), (source), (len), RESULT_GUARD_PTR)
+
+/**
+ * Performs a safer memset
+ *
+ * The following checks are performed:
+ *
+ * * `destination` is non-null
+ *
+ * Callers will still need to ensure the following:
+ *
+ * * The size of the data pointed to by the `destination` parameter shall be at least
+ * `len` bytes.
+ */
+#define RESULT_CHECKED_MEMSET(destination, value, len) __S2N_ENSURE_SAFE_MEMSET((destination), (value), (len), RESULT_ENSURE_REF)
+
+/**
+ * Ensures `s2n_result_is_ok(result)`, otherwise the function will return `S2N_RESULT_ERROR`
+ */
+#define RESULT_GUARD(result) __S2N_ENSURE(s2n_result_is_ok(result), return S2N_RESULT_ERROR)
+
+/**
+ * Ensures `result == _OSSL_SUCCESS`, otherwise the function will `RESULT_BAIL` with `error`
+ */
+#define RESULT_GUARD_OSSL(result, error) __S2N_ENSURE((result) == _OSSL_SUCCESS, RESULT_BAIL(error))
+
+/**
+ * Ensures `(result) >= S2N_SUCCESS`, otherwise the function will return `S2N_RESULT_ERROR`
+ */
+#define RESULT_GUARD_POSIX(result) __S2N_ENSURE((result) >= S2N_SUCCESS, return S2N_RESULT_ERROR)
+
+/**
+ * Ensures `(result) != NULL`, otherwise the function will return `S2N_RESULT_ERROR`
+ *
+ * Does not set s2n_errno to S2N_ERR_NULL, so is NOT a direct replacement for RESULT_ENSURE_REF.
+ */
+#define RESULT_GUARD_PTR(result) __S2N_ENSURE((result) != NULL, return S2N_RESULT_ERROR)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Sets the global `s2n_errno` to `error` and returns with an `S2N_FAILURE`
+ */
+#define POSIX_BAIL(error) do { _S2N_ERROR((error)); return S2N_FAILURE; } while (0)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures the `condition` is `true`, otherwise the function will `POSIX_BAIL` with `error`
+ */
+#define POSIX_ENSURE(condition, error) __S2N_ENSURE((condition), POSIX_BAIL(error))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures the `condition` is `true`, otherwise the function will `POSIX_BAIL` with `error`
+ *
+ * NOTE: The condition will _only_ be checked when the code is compiled in debug mode.
+ * In release mode, the check is removed.
+ */
+#define POSIX_DEBUG_ENSURE(condition, error) __S2N_ENSURE_DEBUG((condition), POSIX_BAIL(error))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `(result) >= S2N_SUCCESS`, otherwise the function will `POSIX_BAIL` with `error`
+ *
+ * This can be useful for overriding the global `s2n_errno`
+ */
+#define POSIX_ENSURE_OK(result, error) __S2N_ENSURE((result) >= S2N_SUCCESS, POSIX_BAIL(error))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `a` is greater than or equal to `b`, otherwise the function will `POSIX_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define POSIX_ENSURE_GTE(a, b) __S2N_ENSURE((a) >= (b), POSIX_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `a` is less than or equal to `b`, otherwise the function will `POSIX_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define POSIX_ENSURE_LTE(a, b) __S2N_ENSURE((a) <= (b), POSIX_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `a` is greater than `b`, otherwise the function will `POSIX_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define POSIX_ENSURE_GT(a, b) __S2N_ENSURE((a) > (b), POSIX_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `a` is less than `b`, otherwise the function will `POSIX_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define POSIX_ENSURE_LT(a, b) __S2N_ENSURE((a) < (b), POSIX_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `a` is equal to `b`, otherwise the function will `POSIX_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define POSIX_ENSURE_EQ(a, b) __S2N_ENSURE((a) == (b), POSIX_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `a` is not equal to `b`, otherwise the function will `POSIX_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define POSIX_ENSURE_NE(a, b) __S2N_ENSURE((a) != (b), POSIX_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `min <= n <= max`, otherwise the function will `POSIX_BAIL` with `S2N_ERR_SAFETY`
+ */
+#define POSIX_ENSURE_INCLUSIVE_RANGE(min, n, max) \
+ do { \
+ __typeof(n) __tmp_n = ( n ); \
+ __typeof(n) __tmp_min = ( min ); \
+ __typeof(n) __tmp_max = ( max ); \
+ POSIX_ENSURE_GTE(__tmp_n, __tmp_min); \
+ POSIX_ENSURE_LTE(__tmp_n, __tmp_max); \
+ } while(0)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `min < n < max`, otherwise the function will `POSIX_BAIL` with `S2N_ERR_SAFETY`
+ */
+#define POSIX_ENSURE_EXCLUSIVE_RANGE(min, n, max) \
+ do { \
+ __typeof(n) __tmp_n = ( n ); \
+ __typeof(n) __tmp_min = ( min ); \
+ __typeof(n) __tmp_max = ( max ); \
+ POSIX_ENSURE_GT(__tmp_n, __tmp_min); \
+ POSIX_ENSURE_LT(__tmp_n, __tmp_max); \
+ } while(0)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `x` is a readable reference, otherwise the function will `POSIX_BAIL` with `S2N_ERR_NULL`
+ */
+#define POSIX_ENSURE_REF(x) __S2N_ENSURE(S2N_OBJECT_PTR_IS_READABLE(x), POSIX_BAIL(S2N_ERR_NULL))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `x` is a mutable reference, otherwise the function will `POSIX_BAIL` with `S2N_ERR_NULL`
+ */
+#define POSIX_ENSURE_MUT(x) __S2N_ENSURE(S2N_OBJECT_PTR_IS_WRITABLE(x), POSIX_BAIL(S2N_ERR_NULL))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures the `result` is `S2N_RESULT_OK`, otherwise the function will return an error signal
+ *
+ * `POSIX_PRECONDITION` should be used at the beginning of a function to make assertions about
+ * the provided arguments. By default, it is functionally equivalent to `POSIX_GUARD_RESULT(result)`
+ * but can be altered by a testing environment to provide additional guarantees.
+ */
+#define POSIX_PRECONDITION(result) POSIX_GUARD_RESULT(__S2N_ENSURE_PRECONDITION((result)))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures the `result` is `S2N_RESULT_OK`, otherwise the function will return an error signal
+ *
+ * NOTE: The condition will _only_ be checked when the code is compiled in debug mode.
+ * In release mode, the check is removed.
+ *
+ * `POSIX_POSTCONDITION` should be used at the end of a function to make assertions about
+ * the resulting state. In debug mode, it is functionally equivalent to `POSIX_GUARD_RESULT(result)`.
+ * In production builds, it becomes a no-op. This can also be altered by a testing environment
+ * to provide additional guarantees.
+ */
+#define POSIX_POSTCONDITION(result) POSIX_GUARD_RESULT(__S2N_ENSURE_POSTCONDITION((result)))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Performs a safer memcpy.
+ *
+ * The following checks are performed:
+ *
+ * * `destination` is non-null
+ * * `source` is non-null
+ *
+ * Callers will still need to ensure the following:
+ *
+ * * The size of the data pointed to by both the `destination` and `source` parameters,
+ * shall be at least `len` bytes.
+ */
+#define POSIX_CHECKED_MEMCPY(destination, source, len) __S2N_ENSURE_SAFE_MEMCPY((destination), (source), (len), POSIX_GUARD_PTR)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Performs a safer memset
+ *
+ * The following checks are performed:
+ *
+ * * `destination` is non-null
+ *
+ * Callers will still need to ensure the following:
+ *
+ * * The size of the data pointed to by the `destination` parameter shall be at least
+ * `len` bytes.
+ */
+#define POSIX_CHECKED_MEMSET(destination, value, len) __S2N_ENSURE_SAFE_MEMSET((destination), (value), (len), POSIX_ENSURE_REF)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `(result) >= S2N_SUCCESS`, otherwise the function will return `S2N_FAILURE`
+ */
+#define POSIX_GUARD(result) __S2N_ENSURE((result) >= S2N_SUCCESS, return S2N_FAILURE)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `result == _OSSL_SUCCESS`, otherwise the function will `POSIX_BAIL` with `error`
+ */
+#define POSIX_GUARD_OSSL(result, error) __S2N_ENSURE((result) == _OSSL_SUCCESS, POSIX_BAIL(error))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `s2n_result_is_ok(result)`, otherwise the function will return `S2N_FAILURE`
+ */
+#define POSIX_GUARD_RESULT(result) __S2N_ENSURE(s2n_result_is_ok(result), return S2N_FAILURE)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `(result) != NULL`, otherwise the function will return `S2N_FAILURE`
+ *
+ * Does not set s2n_errno to S2N_ERR_NULL, so is NOT a direct replacement for POSIX_ENSURE_REF.
+ */
+#define POSIX_GUARD_PTR(result) __S2N_ENSURE((result) != NULL, return S2N_FAILURE)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Sets the global `s2n_errno` to `error` and returns with an `NULL`
+ */
+#define PTR_BAIL(error) do { _S2N_ERROR((error)); return NULL; } while (0)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures the `condition` is `true`, otherwise the function will `PTR_BAIL` with `error`
+ */
+#define PTR_ENSURE(condition, error) __S2N_ENSURE((condition), PTR_BAIL(error))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures the `condition` is `true`, otherwise the function will `PTR_BAIL` with `error`
+ *
+ * NOTE: The condition will _only_ be checked when the code is compiled in debug mode.
+ * In release mode, the check is removed.
+ */
+#define PTR_DEBUG_ENSURE(condition, error) __S2N_ENSURE_DEBUG((condition), PTR_BAIL(error))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `(result) != NULL`, otherwise the function will `PTR_BAIL` with `error`
+ *
+ * This can be useful for overriding the global `s2n_errno`
+ */
+#define PTR_ENSURE_OK(result, error) __S2N_ENSURE((result) != NULL, PTR_BAIL(error))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `a` is greater than or equal to `b`, otherwise the function will `PTR_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define PTR_ENSURE_GTE(a, b) __S2N_ENSURE((a) >= (b), PTR_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `a` is less than or equal to `b`, otherwise the function will `PTR_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define PTR_ENSURE_LTE(a, b) __S2N_ENSURE((a) <= (b), PTR_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `a` is greater than `b`, otherwise the function will `PTR_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define PTR_ENSURE_GT(a, b) __S2N_ENSURE((a) > (b), PTR_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `a` is less than `b`, otherwise the function will `PTR_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define PTR_ENSURE_LT(a, b) __S2N_ENSURE((a) < (b), PTR_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `a` is equal to `b`, otherwise the function will `PTR_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define PTR_ENSURE_EQ(a, b) __S2N_ENSURE((a) == (b), PTR_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `a` is not equal to `b`, otherwise the function will `PTR_BAIL` with a `S2N_ERR_SAFETY` error
+ */
+#define PTR_ENSURE_NE(a, b) __S2N_ENSURE((a) != (b), PTR_BAIL(S2N_ERR_SAFETY))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `min <= n <= max`, otherwise the function will `PTR_BAIL` with `S2N_ERR_SAFETY`
+ */
+#define PTR_ENSURE_INCLUSIVE_RANGE(min, n, max) \
+ do { \
+ __typeof(n) __tmp_n = ( n ); \
+ __typeof(n) __tmp_min = ( min ); \
+ __typeof(n) __tmp_max = ( max ); \
+ PTR_ENSURE_GTE(__tmp_n, __tmp_min); \
+ PTR_ENSURE_LTE(__tmp_n, __tmp_max); \
+ } while(0)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `min < n < max`, otherwise the function will `PTR_BAIL` with `S2N_ERR_SAFETY`
+ */
+#define PTR_ENSURE_EXCLUSIVE_RANGE(min, n, max) \
+ do { \
+ __typeof(n) __tmp_n = ( n ); \
+ __typeof(n) __tmp_min = ( min ); \
+ __typeof(n) __tmp_max = ( max ); \
+ PTR_ENSURE_GT(__tmp_n, __tmp_min); \
+ PTR_ENSURE_LT(__tmp_n, __tmp_max); \
+ } while(0)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `x` is a readable reference, otherwise the function will `PTR_BAIL` with `S2N_ERR_NULL`
+ */
+#define PTR_ENSURE_REF(x) __S2N_ENSURE(S2N_OBJECT_PTR_IS_READABLE(x), PTR_BAIL(S2N_ERR_NULL))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `x` is a mutable reference, otherwise the function will `PTR_BAIL` with `S2N_ERR_NULL`
+ */
+#define PTR_ENSURE_MUT(x) __S2N_ENSURE(S2N_OBJECT_PTR_IS_WRITABLE(x), PTR_BAIL(S2N_ERR_NULL))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures the `result` is `S2N_RESULT_OK`, otherwise the function will return an error signal
+ *
+ * `PTR_PRECONDITION` should be used at the beginning of a function to make assertions about
+ * the provided arguments. By default, it is functionally equivalent to `PTR_GUARD_RESULT(result)`
+ * but can be altered by a testing environment to provide additional guarantees.
+ */
+#define PTR_PRECONDITION(result) PTR_GUARD_RESULT(__S2N_ENSURE_PRECONDITION((result)))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures the `result` is `S2N_RESULT_OK`, otherwise the function will return an error signal
+ *
+ * NOTE: The condition will _only_ be checked when the code is compiled in debug mode.
+ * In release mode, the check is removed.
+ *
+ * `PTR_POSTCONDITION` should be used at the end of a function to make assertions about
+ * the resulting state. In debug mode, it is functionally equivalent to `PTR_GUARD_RESULT(result)`.
+ * In production builds, it becomes a no-op. This can also be altered by a testing environment
+ * to provide additional guarantees.
+ */
+#define PTR_POSTCONDITION(result) PTR_GUARD_RESULT(__S2N_ENSURE_POSTCONDITION((result)))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Performs a safer memcpy.
+ *
+ * The following checks are performed:
+ *
+ * * `destination` is non-null
+ * * `source` is non-null
+ *
+ * Callers will still need to ensure the following:
+ *
+ * * The size of the data pointed to by both the `destination` and `source` parameters,
+ * shall be at least `len` bytes.
+ */
+#define PTR_CHECKED_MEMCPY(destination, source, len) __S2N_ENSURE_SAFE_MEMCPY((destination), (source), (len), PTR_GUARD)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Performs a safer memset
+ *
+ * The following checks are performed:
+ *
+ * * `destination` is non-null
+ *
+ * Callers will still need to ensure the following:
+ *
+ * * The size of the data pointed to by the `destination` parameter shall be at least
+ * `len` bytes.
+ */
+#define PTR_CHECKED_MEMSET(destination, value, len) __S2N_ENSURE_SAFE_MEMSET((destination), (value), (len), PTR_ENSURE_REF)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `(result) != NULL`, otherwise the function will return `NULL`
+ */
+#define PTR_GUARD(result) __S2N_ENSURE((result) != NULL, return NULL)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `result == _OSSL_SUCCESS`, otherwise the function will `PTR_BAIL` with `error`
+ */
+#define PTR_GUARD_OSSL(result, error) __S2N_ENSURE((result) == _OSSL_SUCCESS, PTR_BAIL(error))
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `s2n_result_is_ok(result)`, otherwise the function will return `NULL`
+ */
+#define PTR_GUARD_RESULT(result) __S2N_ENSURE(s2n_result_is_ok(result), return NULL)
+
+/**
+ * DEPRECATED: all methods (except those in s2n.h) should return s2n_result.
+ *
+ * Ensures `(result) >= S2N_SUCCESS`, otherwise the function will return `NULL`
+ */
+#define PTR_GUARD_POSIX(result) __S2N_ENSURE((result) >= S2N_SUCCESS, return NULL)
+
diff --git a/contrib/restricted/aws/s2n/utils/s2n_set.c b/contrib/restricted/aws/s2n/utils/s2n_set.c
index 4073a4cc5d..65c854a73c 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_set.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_set.c
@@ -23,8 +23,8 @@
S2N_RESULT s2n_set_validate(const struct s2n_set *set)
{
- ENSURE_REF(set);
- GUARD_RESULT(s2n_array_validate(set->data));
+ RESULT_ENSURE_REF(set);
+ RESULT_GUARD(s2n_array_validate(set->data));
return S2N_RESULT_OK;
}
@@ -32,14 +32,14 @@ S2N_RESULT s2n_set_validate(const struct s2n_set *set)
* Returns an error if the element already exists */
static S2N_RESULT s2n_set_binary_search(struct s2n_set *set, void *element, uint32_t* out)
{
- GUARD_RESULT(s2n_set_validate(set));
- ENSURE(S2N_MEM_IS_READABLE(element, set->data->element_size), S2N_ERR_NULL);
- ENSURE_REF(out);
+ RESULT_GUARD(s2n_set_validate(set));
+ RESULT_ENSURE(S2N_MEM_IS_READABLE(element, set->data->element_size), S2N_ERR_NULL);
+ RESULT_ENSURE_REF(out);
struct s2n_array *array = set->data;
int (*comparator)(const void*, const void*) = set->comparator;
uint32_t len = 0;
- GUARD_RESULT(s2n_array_num_elements(array, &len));
+ RESULT_GUARD(s2n_array_num_elements(array, &len));
if (len == 0) {
*out = 0;
@@ -53,12 +53,12 @@ static S2N_RESULT s2n_set_binary_search(struct s2n_set *set, void *element, uint
while (low <= top) {
int64_t mid = low + ((top - low) / 2);
void* array_element = NULL;
- GUARD_RESULT(s2n_array_get(array, mid, &array_element));
+ RESULT_GUARD(s2n_array_get(array, mid, &array_element));
int m = comparator(array_element, element);
/* the element is already in the set */
if (m == 0) {
- BAIL(S2N_ERR_SET_DUPLICATE_VALUE);
+ RESULT_BAIL(S2N_ERR_SET_DUPLICATE_VALUE);
}
if (m > 0) {
@@ -74,13 +74,13 @@ static S2N_RESULT s2n_set_binary_search(struct s2n_set *set, void *element, uint
struct s2n_set *s2n_set_new(uint32_t element_size, int (*comparator)(const void*, const void*))
{
- notnull_check_ptr(comparator);
+ PTR_ENSURE_REF(comparator);
struct s2n_blob mem = {0};
- GUARD_POSIX_PTR(s2n_alloc(&mem, sizeof(struct s2n_set)));
+ PTR_GUARD_POSIX(s2n_alloc(&mem, sizeof(struct s2n_set)));
struct s2n_set *set = (void *) mem.data;
*set = (struct s2n_set) {.data = s2n_array_new(element_size), .comparator = comparator};
if(set->data == NULL) {
- GUARD_POSIX_PTR(s2n_free(&mem));
+ PTR_GUARD_POSIX(s2n_free(&mem));
return NULL;
}
return set;
@@ -88,43 +88,43 @@ struct s2n_set *s2n_set_new(uint32_t element_size, int (*comparator)(const void*
S2N_RESULT s2n_set_add(struct s2n_set *set, void *element)
{
- GUARD_RESULT(s2n_set_validate(set));
+ RESULT_GUARD(s2n_set_validate(set));
- uint32_t index = 0;
- GUARD_RESULT(s2n_set_binary_search(set, element, &index));
- GUARD_RESULT(s2n_array_insert_and_copy(set->data, index, element));
+ uint32_t idx = 0;
+ RESULT_GUARD(s2n_set_binary_search(set, element, &idx));
+ RESULT_GUARD(s2n_array_insert_and_copy(set->data, idx, element));
return S2N_RESULT_OK;
}
-S2N_RESULT s2n_set_get(struct s2n_set *set, uint32_t index, void **element)
+S2N_RESULT s2n_set_get(struct s2n_set *set, uint32_t idx, void **element)
{
- GUARD_RESULT(s2n_set_validate(set));
- ENSURE_REF(element);
+ RESULT_GUARD(s2n_set_validate(set));
+ RESULT_ENSURE_REF(element);
- GUARD_RESULT(s2n_array_get(set->data, index, element));
+ RESULT_GUARD(s2n_array_get(set->data, idx, element));
return S2N_RESULT_OK;
}
-S2N_RESULT s2n_set_remove(struct s2n_set *set, uint32_t index)
+S2N_RESULT s2n_set_remove(struct s2n_set *set, uint32_t idx)
{
- GUARD_RESULT(s2n_set_validate(set));
- GUARD_RESULT(s2n_array_remove(set->data, index));
+ RESULT_GUARD(s2n_set_validate(set));
+ RESULT_GUARD(s2n_array_remove(set->data, idx));
return S2N_RESULT_OK;
}
S2N_RESULT s2n_set_free_p(struct s2n_set **pset)
{
- ENSURE_REF(pset);
+ RESULT_ENSURE_REF(pset);
struct s2n_set *set = *pset;
- ENSURE_REF(set);
- GUARD_RESULT(s2n_array_free(set->data));
+ RESULT_ENSURE_REF(set);
+ RESULT_GUARD(s2n_array_free(set->data));
/* And finally the set object. */
- GUARD_AS_RESULT(s2n_free_object((uint8_t **)pset, sizeof(struct s2n_set)));
+ RESULT_GUARD_POSIX(s2n_free_object((uint8_t **)pset, sizeof(struct s2n_set)));
return S2N_RESULT_OK;
@@ -132,16 +132,16 @@ S2N_RESULT s2n_set_free_p(struct s2n_set **pset)
S2N_RESULT s2n_set_free(struct s2n_set *set)
{
- ENSURE_REF(set);
+ RESULT_ENSURE_REF(set);
return s2n_set_free_p(&set);
}
S2N_RESULT s2n_set_len(struct s2n_set *set, uint32_t *len)
{
- GUARD_RESULT(s2n_set_validate(set));
+ RESULT_GUARD(s2n_set_validate(set));
- GUARD_RESULT(s2n_array_num_elements(set->data, len));
+ RESULT_GUARD(s2n_array_num_elements(set->data, len));
return S2N_RESULT_OK;
}
diff --git a/contrib/restricted/aws/s2n/utils/s2n_set.h b/contrib/restricted/aws/s2n/utils/s2n_set.h
index b3dba15453..917123a076 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_set.h
+++ b/contrib/restricted/aws/s2n/utils/s2n_set.h
@@ -14,7 +14,7 @@
*/
#pragma once
-#include <s2n.h>
+#include "api/s2n.h"
#include "utils/s2n_result.h"
#include "utils/s2n_array.h"
@@ -26,8 +26,8 @@ struct s2n_set {
extern S2N_RESULT s2n_set_validate(const struct s2n_set *set);
extern struct s2n_set *s2n_set_new(uint32_t element_size, int (*comparator)(const void*, const void*));
extern S2N_RESULT s2n_set_add(struct s2n_set *set, void *element);
-extern S2N_RESULT s2n_set_get(struct s2n_set *set, uint32_t index, void **element);
-extern S2N_RESULT s2n_set_remove(struct s2n_set *set, uint32_t index);
+extern S2N_RESULT s2n_set_get(struct s2n_set *set, uint32_t idx, void **element);
+extern S2N_RESULT s2n_set_remove(struct s2n_set *set, uint32_t idx);
extern S2N_RESULT s2n_set_free_p(struct s2n_set **pset);
extern S2N_RESULT s2n_set_free(struct s2n_set *set);
extern S2N_RESULT s2n_set_len(struct s2n_set *set, uint32_t *len);
diff --git a/contrib/restricted/aws/s2n/utils/s2n_socket.c b/contrib/restricted/aws/s2n/utils/s2n_socket.c
index daefd1168a..4c809f4cf5 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_socket.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_socket.c
@@ -13,10 +13,10 @@
* permissions and limitations under the License.
*/
-#include <tls/s2n_connection.h>
+#include "tls/s2n_connection.h"
-#include <utils/s2n_socket.h>
-#include <utils/s2n_safety.h>
+#include "utils/s2n_socket.h"
+#include "utils/s2n_safety.h"
#include <netinet/tcp.h>
#include <netinet/in.h>
@@ -40,11 +40,13 @@
int s2n_socket_quickack(struct s2n_connection *conn)
{
#ifdef TCP_QUICKACK
- if (!conn->managed_io) {
+ POSIX_ENSURE_REF(conn);
+ if (!conn->managed_recv_io) {
return 0;
}
struct s2n_socket_read_io_context *r_io_ctx = (struct s2n_socket_read_io_context *) conn->recv_io_context;
+ POSIX_ENSURE_REF(r_io_ctx);
if (r_io_ctx->tcp_quickack_set) {
return 0;
}
@@ -63,29 +65,28 @@ int s2n_socket_write_snapshot(struct s2n_connection *conn)
{
#ifdef S2N_CORK
socklen_t corklen = sizeof(int);
-
+ POSIX_ENSURE_REF(conn);
struct s2n_socket_write_io_context *w_io_ctx = (struct s2n_socket_write_io_context *) conn->send_io_context;
- notnull_check(w_io_ctx);
+ POSIX_ENSURE_REF(w_io_ctx);
getsockopt(w_io_ctx->fd, IPPROTO_TCP, S2N_CORK, &w_io_ctx->original_cork_val, &corklen);
- eq_check(corklen, sizeof(int));
+ POSIX_ENSURE_EQ(corklen, sizeof(int));
w_io_ctx->original_cork_is_set = 1;
#endif
return 0;
}
-
int s2n_socket_read_snapshot(struct s2n_connection *conn)
{
#ifdef SO_RCVLOWAT
socklen_t watlen = sizeof(int);
-
+ POSIX_ENSURE_REF(conn);
struct s2n_socket_read_io_context *r_io_ctx = (struct s2n_socket_read_io_context *) conn->recv_io_context;
- notnull_check(r_io_ctx);
+ POSIX_ENSURE_REF(r_io_ctx);
getsockopt(r_io_ctx->fd, SOL_SOCKET, SO_RCVLOWAT, &r_io_ctx->original_rcvlowat_val, &watlen);
- eq_check(watlen, sizeof(int));
+ POSIX_ENSURE_EQ(watlen, sizeof(int));
r_io_ctx->original_rcvlowat_is_set = 1;
#endif
@@ -95,8 +96,9 @@ int s2n_socket_read_snapshot(struct s2n_connection *conn)
int s2n_socket_write_restore(struct s2n_connection *conn)
{
#ifdef S2N_CORK
+ POSIX_ENSURE_REF(conn);
struct s2n_socket_write_io_context *w_io_ctx = (struct s2n_socket_write_io_context *) conn->send_io_context;
- notnull_check(w_io_ctx);
+ POSIX_ENSURE_REF(w_io_ctx);
if (!w_io_ctx->original_cork_is_set) {
return 0;
@@ -111,8 +113,9 @@ int s2n_socket_write_restore(struct s2n_connection *conn)
int s2n_socket_read_restore(struct s2n_connection *conn)
{
#ifdef SO_RCVLOWAT
+ POSIX_ENSURE_REF(conn);
struct s2n_socket_read_io_context *r_io_ctx = (struct s2n_socket_read_io_context *) conn->recv_io_context;
- notnull_check(r_io_ctx);
+ POSIX_ENSURE_REF(r_io_ctx);
if (!r_io_ctx->original_rcvlowat_is_set) {
return 0;
@@ -126,13 +129,14 @@ int s2n_socket_read_restore(struct s2n_connection *conn)
int s2n_socket_was_corked(struct s2n_connection *conn)
{
+ POSIX_ENSURE_REF(conn);
/* If we're not using custom I/O and a send fd has not been set yet, return false*/
- if (!conn->managed_io || !conn->send) {
+ if (!conn->managed_send_io || !conn->send) {
return 0;
}
struct s2n_socket_write_io_context *io_ctx = (struct s2n_socket_write_io_context *) conn->send_io_context;
- notnull_check(io_ctx);
+ POSIX_ENSURE_REF(io_ctx);
return io_ctx->original_cork_val;
}
@@ -140,10 +144,11 @@ int s2n_socket_was_corked(struct s2n_connection *conn)
int s2n_socket_write_cork(struct s2n_connection *conn)
{
#ifdef S2N_CORK
+ POSIX_ENSURE_REF(conn);
int optval = S2N_CORK_ON;
struct s2n_socket_write_io_context *w_io_ctx = (struct s2n_socket_write_io_context *) conn->send_io_context;
- notnull_check(w_io_ctx);
+ POSIX_ENSURE_REF(w_io_ctx);
/* Ignore the return value, if it fails it fails */
setsockopt(w_io_ctx->fd, IPPROTO_TCP, S2N_CORK, &optval, sizeof(optval));
@@ -155,10 +160,11 @@ int s2n_socket_write_cork(struct s2n_connection *conn)
int s2n_socket_write_uncork(struct s2n_connection *conn)
{
#ifdef S2N_CORK
+ POSIX_ENSURE_REF(conn);
int optval = S2N_CORK_OFF;
struct s2n_socket_write_io_context *w_io_ctx = (struct s2n_socket_write_io_context *) conn->send_io_context;
- notnull_check(w_io_ctx);
+ POSIX_ENSURE_REF(w_io_ctx);
/* Ignore the return value, if it fails it fails */
setsockopt(w_io_ctx->fd, IPPROTO_TCP, S2N_CORK, &optval, sizeof(optval));
@@ -170,8 +176,9 @@ int s2n_socket_write_uncork(struct s2n_connection *conn)
int s2n_socket_set_read_size(struct s2n_connection *conn, int size)
{
#ifdef SO_RCVLOWAT
+ POSIX_ENSURE_REF(conn);
struct s2n_socket_read_io_context *r_io_ctx = (struct s2n_socket_read_io_context *) conn->recv_io_context;
- notnull_check(r_io_ctx);
+ POSIX_ENSURE_REF(r_io_ctx);
setsockopt(r_io_ctx->fd, SOL_SOCKET, SO_RCVLOWAT, &size, sizeof(size));
#endif
@@ -181,10 +188,12 @@ int s2n_socket_set_read_size(struct s2n_connection *conn, int size)
int s2n_socket_read(void *io_context, uint8_t *buf, uint32_t len)
{
+ POSIX_ENSURE_REF(io_context);
+ POSIX_ENSURE_REF(buf);
int rfd = ((struct s2n_socket_read_io_context*) io_context)->fd;
if (rfd < 0) {
errno = EBADF;
- S2N_ERROR(S2N_ERR_BAD_FD);
+ POSIX_BAIL(S2N_ERR_BAD_FD);
}
/* Clear the quickack flag so we know to reset it */
@@ -192,30 +201,36 @@ int s2n_socket_read(void *io_context, uint8_t *buf, uint32_t len)
/* On success, the number of bytes read is returned. On failure, -1 is
* returned and errno is set appropriately. */
- return read(rfd, buf, len);
+ ssize_t result = read(rfd, buf, len);
+ POSIX_ENSURE_INCLUSIVE_RANGE(INT_MIN, result, INT_MAX);
+ return result;
}
int s2n_socket_write(void *io_context, const uint8_t *buf, uint32_t len)
{
+ POSIX_ENSURE_REF(io_context);
+ POSIX_ENSURE_REF(buf);
int wfd = ((struct s2n_socket_write_io_context*) io_context)->fd;
if (wfd < 0) {
errno = EBADF;
- S2N_ERROR(S2N_ERR_BAD_FD);
+ POSIX_BAIL(S2N_ERR_BAD_FD);
}
/* On success, the number of bytes written is returned. On failure, -1 is
* returned and errno is set appropriately. */
- return write(wfd, buf, len);
+ ssize_t result = write(wfd, buf, len);
+ POSIX_ENSURE_INCLUSIVE_RANGE(INT_MIN, result, INT_MAX);
+ return result;
}
int s2n_socket_is_ipv6(int fd, uint8_t *ipv6)
{
- notnull_check(ipv6);
+ POSIX_ENSURE_REF(ipv6);
socklen_t len;
struct sockaddr_storage addr;
len = sizeof (addr);
- GUARD(getpeername(fd, (struct sockaddr*)&addr, &len));
+ POSIX_GUARD(getpeername(fd, (struct sockaddr*)&addr, &len));
*ipv6 = 0;
if (AF_INET6 == addr.ss_family) {
diff --git a/contrib/restricted/aws/s2n/utils/s2n_str.c b/contrib/restricted/aws/s2n/utils/s2n_str.c
deleted file mode 100644
index a1be56b427..0000000000
--- a/contrib/restricted/aws/s2n/utils/s2n_str.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://aws.amazon.com/apache2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-#include <string.h>
-#include <sys/param.h>
-#include "utils/s2n_str.h"
-
-char *s2n_strcpy(char *buf, char *last, const char *str) {
-
-/* CBMC pointer checks need to be disabled to compare buf and last for
- * the case where they are the same. */
-#pragma CPROVER check push
-#pragma CPROVER check disable "pointer"
-
- if (buf >= last) {
- return buf;
- }
-
-#pragma CPROVER check pop
-
- if (NULL == str) {
- *buf = '\0';
- return buf;
- }
-
- /* Free bytes needs to be one byte smaller than size of a storage,
- * as strncpy always writes '\0', but doesn't include it in n
- */
- size_t bytes_to_copy = MIN(last - buf - 1, strlen(str));
-
- char *p = buf;
- if (bytes_to_copy > 0) {
- p = (char *)memcpy(buf, str, bytes_to_copy) + bytes_to_copy;
- }
- *p = '\0';
-
- return p;
-}
diff --git a/contrib/restricted/aws/s2n/utils/s2n_timer.c b/contrib/restricted/aws/s2n/utils/s2n_timer.c
index e1766ce1e3..ae93f6a7bb 100644
--- a/contrib/restricted/aws/s2n/utils/s2n_timer.c
+++ b/contrib/restricted/aws/s2n/utils/s2n_timer.c
@@ -21,7 +21,7 @@
S2N_RESULT s2n_timer_start(struct s2n_config *config, struct s2n_timer *timer)
{
- GUARD_AS_RESULT(config->monotonic_clock(config->monotonic_clock_ctx, &timer->time));
+ RESULT_GUARD_POSIX(config->monotonic_clock(config->monotonic_clock_ctx, &timer->time));
return S2N_RESULT_OK;
}
@@ -30,7 +30,7 @@ S2N_RESULT s2n_timer_elapsed(struct s2n_config *config, struct s2n_timer *timer,
{
uint64_t current_time;
- GUARD_AS_RESULT(config->monotonic_clock(config->monotonic_clock_ctx, &current_time));
+ RESULT_GUARD_POSIX(config->monotonic_clock(config->monotonic_clock_ctx, &current_time));
*nanoseconds = current_time - timer->time;
@@ -41,7 +41,7 @@ S2N_RESULT s2n_timer_reset(struct s2n_config *config, struct s2n_timer *timer, u
{
uint64_t previous_time = timer->time;
- GUARD_RESULT(s2n_timer_start(config, timer));
+ RESULT_GUARD(s2n_timer_start(config, timer));
*nanoseconds = timer->time - previous_time;