aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/aws/s2n/pq-crypto/bike_r2
diff options
context:
space:
mode:
authororivej <orivej@yandex-team.ru>2022-02-10 16:44:49 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:49 +0300
commit718c552901d703c502ccbefdfc3c9028d608b947 (patch)
tree46534a98bbefcd7b1f3faa5b52c138ab27db75b7 /contrib/restricted/aws/s2n/pq-crypto/bike_r2
parente9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (diff)
downloadydb-718c552901d703c502ccbefdfc3c9028d608b947.tar.gz
Restoring authorship annotation for <orivej@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/restricted/aws/s2n/pq-crypto/bike_r2')
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.c210
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.h98
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_wrap.h142
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_defs.h214
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_r2_kem.c748
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/cleanup.h262
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.c730
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.h56
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/defs.h288
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/error.c22
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/error.h72
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/functions_renaming.h120
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x.h110
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_internal.h64
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_mul.c194
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_portable.c216
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.c374
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.h66
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.c236
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.h156
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling_portable.c96
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/secure_decode_portable.c132
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/sha.h82
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/types.h278
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/utilities.c320
-rw-r--r--contrib/restricted/aws/s2n/pq-crypto/bike_r2/utilities.h316
26 files changed, 2801 insertions, 2801 deletions
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.c
index 26c99bc80d..90b2f10824 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.c
@@ -1,105 +1,105 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#include "aes_ctr_prf.h"
-#include "utilities.h"
-#include <string.h>
-
-ret_t
-init_aes_ctr_prf_state(OUT aes_ctr_prf_state_t *s,
- IN const uint32_t max_invokations,
- IN const seed_t *seed)
-{
- if(0 == max_invokations)
- {
- BIKE_ERROR(E_AES_CTR_PRF_INIT_FAIL);
- }
-
- // Set the key schedule (from seed).
- // Make sure the size matches the AES256 key size
- DEFER_CLEANUP(aes256_key_t key, aes256_key_cleanup);
-
- bike_static_assert(sizeof(*seed) == sizeof(key.raw), seed_size_equals_ky_size);
- memcpy(key.raw, seed->raw, sizeof(key.raw));
-
- GUARD(aes256_key_expansion(&s->ks_ptr, &key));
-
- // Initialize buffer and counter
- s->ctr.u.qw[0] = 0;
- s->ctr.u.qw[1] = 0;
- s->buffer.u.qw[0] = 0;
- s->buffer.u.qw[1] = 0;
-
- s->pos = AES256_BLOCK_SIZE;
- s->rem_invokations = max_invokations;
-
- SEDMSG(" Init aes_prf_ctr state:\n");
- SEDMSG(" s.pos = %d\n", s->pos);
- SEDMSG(" s.rem_invokations = %u\n", s->rem_invokations);
- SEDMSG(" s.ctr = 0x\n");
-
- return SUCCESS;
-}
-
-_INLINE_ ret_t
-perform_aes(OUT uint8_t *ct, IN OUT aes_ctr_prf_state_t *s)
-{
- // Ensure that the CTR is big enough
- bike_static_assert(
- ((sizeof(s->ctr.u.qw[0]) == 8) && (BIT(33) >= MAX_AES_INVOKATION)),
- ctr_size_is_too_small);
-
- if(0 == s->rem_invokations)
- {
- BIKE_ERROR(E_AES_OVER_USED);
- }
-
- GUARD(aes256_enc(ct, s->ctr.u.bytes, &s->ks_ptr));
-
- s->ctr.u.qw[0]++;
- s->rem_invokations--;
-
- return SUCCESS;
-}
-
-ret_t
-aes_ctr_prf(OUT uint8_t *a, IN OUT aes_ctr_prf_state_t *s, IN const uint32_t len)
-{
- // When Len is smaller than whats left in the buffer
- // No need in additional AES
- if((len + s->pos) <= AES256_BLOCK_SIZE)
- {
- memcpy(a, &s->buffer.u.bytes[s->pos], len);
- s->pos += len;
-
- return SUCCESS;
- }
-
- // If s.pos != AES256_BLOCK_SIZE then copy whats left in the buffer
- // Else copy zero bytes
- uint32_t idx = AES256_BLOCK_SIZE - s->pos;
- memcpy(a, &s->buffer.u.bytes[s->pos], idx);
-
- // Init s.pos
- s->pos = 0;
-
- // Copy full AES blocks
- while((len - idx) >= AES256_BLOCK_SIZE)
- {
- GUARD(perform_aes(&a[idx], s));
- idx += AES256_BLOCK_SIZE;
- }
-
- GUARD(perform_aes(s->buffer.u.bytes, s));
-
- // Copy the tail
- s->pos = len - idx;
- memcpy(&a[idx], s->buffer.u.bytes, s->pos);
-
- return SUCCESS;
-}
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#include "aes_ctr_prf.h"
+#include "utilities.h"
+#include <string.h>
+
+ret_t
+init_aes_ctr_prf_state(OUT aes_ctr_prf_state_t *s,
+ IN const uint32_t max_invokations,
+ IN const seed_t *seed)
+{
+ if(0 == max_invokations)
+ {
+ BIKE_ERROR(E_AES_CTR_PRF_INIT_FAIL);
+ }
+
+ // Set the key schedule (from seed).
+ // Make sure the size matches the AES256 key size
+ DEFER_CLEANUP(aes256_key_t key, aes256_key_cleanup);
+
+ bike_static_assert(sizeof(*seed) == sizeof(key.raw), seed_size_equals_ky_size);
+ memcpy(key.raw, seed->raw, sizeof(key.raw));
+
+ GUARD(aes256_key_expansion(&s->ks_ptr, &key));
+
+ // Initialize buffer and counter
+ s->ctr.u.qw[0] = 0;
+ s->ctr.u.qw[1] = 0;
+ s->buffer.u.qw[0] = 0;
+ s->buffer.u.qw[1] = 0;
+
+ s->pos = AES256_BLOCK_SIZE;
+ s->rem_invokations = max_invokations;
+
+ SEDMSG(" Init aes_prf_ctr state:\n");
+ SEDMSG(" s.pos = %d\n", s->pos);
+ SEDMSG(" s.rem_invokations = %u\n", s->rem_invokations);
+ SEDMSG(" s.ctr = 0x\n");
+
+ return SUCCESS;
+}
+
+_INLINE_ ret_t
+perform_aes(OUT uint8_t *ct, IN OUT aes_ctr_prf_state_t *s)
+{
+ // Ensure that the CTR is big enough
+ bike_static_assert(
+ ((sizeof(s->ctr.u.qw[0]) == 8) && (BIT(33) >= MAX_AES_INVOKATION)),
+ ctr_size_is_too_small);
+
+ if(0 == s->rem_invokations)
+ {
+ BIKE_ERROR(E_AES_OVER_USED);
+ }
+
+ GUARD(aes256_enc(ct, s->ctr.u.bytes, &s->ks_ptr));
+
+ s->ctr.u.qw[0]++;
+ s->rem_invokations--;
+
+ return SUCCESS;
+}
+
+ret_t
+aes_ctr_prf(OUT uint8_t *a, IN OUT aes_ctr_prf_state_t *s, IN const uint32_t len)
+{
+ // When Len is smaller than whats left in the buffer
+ // No need in additional AES
+ if((len + s->pos) <= AES256_BLOCK_SIZE)
+ {
+ memcpy(a, &s->buffer.u.bytes[s->pos], len);
+ s->pos += len;
+
+ return SUCCESS;
+ }
+
+ // If s.pos != AES256_BLOCK_SIZE then copy whats left in the buffer
+ // Else copy zero bytes
+ uint32_t idx = AES256_BLOCK_SIZE - s->pos;
+ memcpy(a, &s->buffer.u.bytes[s->pos], idx);
+
+ // Init s.pos
+ s->pos = 0;
+
+ // Copy full AES blocks
+ while((len - idx) >= AES256_BLOCK_SIZE)
+ {
+ GUARD(perform_aes(&a[idx], s));
+ idx += AES256_BLOCK_SIZE;
+ }
+
+ GUARD(perform_aes(s->buffer.u.bytes, s));
+
+ // Copy the tail
+ s->pos = len - idx;
+ memcpy(&a[idx], s->buffer.u.bytes, s->pos);
+
+ return SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.h
index ac17d4ddd5..bfcdeebd4a 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.h
@@ -1,49 +1,49 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#pragma once
-
-#include "aes_wrap.h"
-
-//////////////////////////////
-// Types
-/////////////////////////////
-
-typedef struct aes_ctr_prf_state_s
-{
- uint128_t ctr;
- uint128_t buffer;
- aes256_ks_t ks_ptr;
- uint32_t rem_invokations;
- uint8_t pos;
-} aes_ctr_prf_state_t;
-
-//////////////////////////////
-// Methods
-/////////////////////////////
-
-ret_t
-init_aes_ctr_prf_state(OUT aes_ctr_prf_state_t *s,
- IN uint32_t max_invokations,
- IN const seed_t *seed);
-
-ret_t
-aes_ctr_prf(OUT uint8_t *a, IN OUT aes_ctr_prf_state_t *s, IN uint32_t len);
-
-_INLINE_ void
-finalize_aes_ctr_prf(IN OUT aes_ctr_prf_state_t *s)
-{
- aes256_free_ks(&s->ks_ptr);
- secure_clean((uint8_t *)s, sizeof(*s));
-}
-
-_INLINE_ void
-aes_ctr_prf_state_cleanup(IN OUT aes_ctr_prf_state_t *s)
-{
- finalize_aes_ctr_prf(s);
-}
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#pragma once
+
+#include "aes_wrap.h"
+
+//////////////////////////////
+// Types
+/////////////////////////////
+
+typedef struct aes_ctr_prf_state_s
+{
+ uint128_t ctr;
+ uint128_t buffer;
+ aes256_ks_t ks_ptr;
+ uint32_t rem_invokations;
+ uint8_t pos;
+} aes_ctr_prf_state_t;
+
+//////////////////////////////
+// Methods
+/////////////////////////////
+
+ret_t
+init_aes_ctr_prf_state(OUT aes_ctr_prf_state_t *s,
+ IN uint32_t max_invokations,
+ IN const seed_t *seed);
+
+ret_t
+aes_ctr_prf(OUT uint8_t *a, IN OUT aes_ctr_prf_state_t *s, IN uint32_t len);
+
+_INLINE_ void
+finalize_aes_ctr_prf(IN OUT aes_ctr_prf_state_t *s)
+{
+ aes256_free_ks(&s->ks_ptr);
+ secure_clean((uint8_t *)s, sizeof(*s));
+}
+
+_INLINE_ void
+aes_ctr_prf_state_cleanup(IN OUT aes_ctr_prf_state_t *s)
+{
+ finalize_aes_ctr_prf(s);
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_wrap.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_wrap.h
index f0adc0bb52..1a377d1c15 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_wrap.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_wrap.h
@@ -1,71 +1,71 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker, Shay Gueron, and Dusan Kostic,
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com, dkostic@amazon.com)
- */
-
-#pragma once
-
-#include "cleanup.h"
-#include <openssl/evp.h>
-
-#define MAX_AES_INVOKATION (MASK(32))
-
-#define AES256_KEY_SIZE (32U)
-#define AES256_KEY_BITS (AES256_KEY_SIZE * 8)
-#define AES256_BLOCK_SIZE (16U)
-#define AES256_ROUNDS (14U)
-
-typedef ALIGN(16) struct aes256_key_s
-{
- uint8_t raw[AES256_KEY_SIZE];
-} aes256_key_t;
-
-_INLINE_ void
-aes256_key_cleanup(aes256_key_t *o)
-{
- secure_clean(o->raw, sizeof(*o));
-}
-
-// Using OpenSSL structures
-typedef EVP_CIPHER_CTX *aes256_ks_t;
-
-_INLINE_ ret_t
-aes256_key_expansion(OUT aes256_ks_t *ks, IN const aes256_key_t *key)
-{
- *ks = EVP_CIPHER_CTX_new();
- if(*ks == NULL)
- {
- BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
- }
- if(0 == EVP_EncryptInit_ex(*ks, EVP_aes_256_ecb(), NULL, key->raw, NULL))
- {
- EVP_CIPHER_CTX_free(*ks);
- *ks = NULL;
- BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
- }
-
- EVP_CIPHER_CTX_set_padding(*ks, 0);
-
- return SUCCESS;
-}
-
-_INLINE_ ret_t
-aes256_enc(OUT uint8_t *ct, IN const uint8_t *pt, IN const aes256_ks_t *ks)
-{
- int outlen = 0;
- if(0 == EVP_EncryptUpdate(*ks, ct, &outlen, pt, AES256_BLOCK_SIZE))
- {
- BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
- }
- return SUCCESS;
-}
-
-_INLINE_ void
-aes256_free_ks(OUT aes256_ks_t *ks)
-{
- EVP_CIPHER_CTX_free(*ks);
- *ks = NULL;
-}
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron, and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com, dkostic@amazon.com)
+ */
+
+#pragma once
+
+#include "cleanup.h"
+#include <openssl/evp.h>
+
+#define MAX_AES_INVOKATION (MASK(32))
+
+#define AES256_KEY_SIZE (32U)
+#define AES256_KEY_BITS (AES256_KEY_SIZE * 8)
+#define AES256_BLOCK_SIZE (16U)
+#define AES256_ROUNDS (14U)
+
+typedef ALIGN(16) struct aes256_key_s
+{
+ uint8_t raw[AES256_KEY_SIZE];
+} aes256_key_t;
+
+_INLINE_ void
+aes256_key_cleanup(aes256_key_t *o)
+{
+ secure_clean(o->raw, sizeof(*o));
+}
+
+// Using OpenSSL structures
+typedef EVP_CIPHER_CTX *aes256_ks_t;
+
+_INLINE_ ret_t
+aes256_key_expansion(OUT aes256_ks_t *ks, IN const aes256_key_t *key)
+{
+ *ks = EVP_CIPHER_CTX_new();
+ if(*ks == NULL)
+ {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+ if(0 == EVP_EncryptInit_ex(*ks, EVP_aes_256_ecb(), NULL, key->raw, NULL))
+ {
+ EVP_CIPHER_CTX_free(*ks);
+ *ks = NULL;
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+
+ EVP_CIPHER_CTX_set_padding(*ks, 0);
+
+ return SUCCESS;
+}
+
+_INLINE_ ret_t
+aes256_enc(OUT uint8_t *ct, IN const uint8_t *pt, IN const aes256_ks_t *ks)
+{
+ int outlen = 0;
+ if(0 == EVP_EncryptUpdate(*ks, ct, &outlen, pt, AES256_BLOCK_SIZE))
+ {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+ return SUCCESS;
+}
+
+_INLINE_ void
+aes256_free_ks(OUT aes256_ks_t *ks)
+{
+ EVP_CIPHER_CTX_free(*ks);
+ *ks = NULL;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_defs.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_defs.h
index 34a221462b..b64014abf3 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_defs.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_defs.h
@@ -1,107 +1,107 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#pragma once
-
-#include "defs.h"
-
-#define LEVEL 1
-
-////////////////////////////////////////////
-// BIKE Parameters
-///////////////////////////////////////////
-#define N0 2
-
-#ifndef LEVEL
-# define LEVEL 1
-#endif
-
-#if(LEVEL == 3)
-# ifdef INDCPA
-# define R_BITS 19853
-# else
-# define R_BITS 24821
-# endif
-# define DV 103
-# define T1 199
-
-# define THRESHOLD_COEFF0 15.932
-# define THRESHOLD_COEFF1 0.0052936
-
-// The gfm code is optimized to a block size in this case:
-# define BLOCK_SIZE 32768
-#elif(LEVEL == 1)
-// 64-bits of post-quantum security parameters (BIKE paper):
-# ifdef INDCPA
-# define R_BITS 10163
-# else
-# define R_BITS 11779
-# endif
-# define DV 71
-# define T1 134
-
-# define THRESHOLD_COEFF0 13.530
-# define THRESHOLD_COEFF1 0.0069721
-
-// The gfm code is optimized to a block size in this case:
-# define BLOCK_SIZE (16384)
-#else
-# error "Bad level, choose one of 1/3"
-#endif
-
-#ifdef INDCPA
-# define NUM_OF_SEEDS 2
-#else
-# define NUM_OF_SEEDS 3
-#endif
-
-// Round the size to the nearest byte.
-// SIZE suffix, is the number of bytes (uint8_t).
-#define N_BITS (R_BITS * N0)
-#define R_SIZE DIVIDE_AND_CEIL(R_BITS, 8)
-#define R_QW DIVIDE_AND_CEIL(R_BITS, 8 * QW_SIZE)
-#define R_YMM DIVIDE_AND_CEIL(R_BITS, 8 * YMM_SIZE)
-#define R_ZMM DIVIDE_AND_CEIL(R_BITS, 8 * ZMM_SIZE)
-
-#define N_SIZE DIVIDE_AND_CEIL(N_BITS, 8)
-
-#define R_BLOCKS DIVIDE_AND_CEIL(R_BITS, BLOCK_SIZE)
-#define R_PADDED (R_BLOCKS * BLOCK_SIZE)
-#define R_PADDED_SIZE (R_PADDED / 8)
-#define R_PADDED_QW (R_PADDED / 64)
-
-#define N_BLOCKS DIVIDE_AND_CEIL(N_BITS, BLOCK_SIZE)
-#define N_PADDED (N_BLOCKS * BLOCK_SIZE)
-#define N_PADDED_SIZE (N_PADDED / 8)
-#define N_PADDED_QW (N_PADDED / 64)
-
-#define R_DDQWORDS_BITS (DIVIDE_AND_CEIL(R_BITS, ALL_YMM_SIZE) * ALL_YMM_SIZE)
-bike_static_assert((R_BITS % ALL_YMM_SIZE != 0), rbits_512_err);
-
-#define N_DDQWORDS_BITS (R_DDQWORDS_BITS + R_BITS)
-bike_static_assert((N_BITS % ALL_YMM_SIZE != 0), nbits_512_err);
-
-#define LAST_R_QW_LEAD (R_BITS & MASK(6))
-#define LAST_R_QW_TRAIL (64 - LAST_R_QW_LEAD)
-#define LAST_R_QW_MASK MASK(LAST_R_QW_LEAD)
-
-#define LAST_R_BYTE_LEAD (R_BITS & MASK(3))
-#define LAST_R_BYTE_TRAIL (8 - LAST_R_BYTE_LEAD)
-#define LAST_R_BYTE_MASK MASK(LAST_R_BYTE_LEAD)
-
-// BIKE auxiliary functions parameters:
-#define ELL_K_BITS 256
-#define ELL_K_SIZE (ELL_K_BITS / 8)
-
-////////////////////////////////
-// Parameters for the BG decoder.
-////////////////////////////////
-#define DELTA 3
-#define SLICES (LOG2_MSB(DV) + 1)
-
-#define BGF_DECODER
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#pragma once
+
+#include "defs.h"
+
+#define LEVEL 1
+
+////////////////////////////////////////////
+// BIKE Parameters
+///////////////////////////////////////////
+#define N0 2
+
+#ifndef LEVEL
+# define LEVEL 1
+#endif
+
+#if(LEVEL == 3)
+# ifdef INDCPA
+# define R_BITS 19853
+# else
+# define R_BITS 24821
+# endif
+# define DV 103
+# define T1 199
+
+# define THRESHOLD_COEFF0 15.932
+# define THRESHOLD_COEFF1 0.0052936
+
+// The gfm code is optimized to a block size in this case:
+# define BLOCK_SIZE 32768
+#elif(LEVEL == 1)
+// 64-bits of post-quantum security parameters (BIKE paper):
+# ifdef INDCPA
+# define R_BITS 10163
+# else
+# define R_BITS 11779
+# endif
+# define DV 71
+# define T1 134
+
+# define THRESHOLD_COEFF0 13.530
+# define THRESHOLD_COEFF1 0.0069721
+
+// The gfm code is optimized to a block size in this case:
+# define BLOCK_SIZE (16384)
+#else
+# error "Bad level, choose one of 1/3"
+#endif
+
+#ifdef INDCPA
+# define NUM_OF_SEEDS 2
+#else
+# define NUM_OF_SEEDS 3
+#endif
+
+// Round the size to the nearest byte.
+// SIZE suffix, is the number of bytes (uint8_t).
+#define N_BITS (R_BITS * N0)
+#define R_SIZE DIVIDE_AND_CEIL(R_BITS, 8)
+#define R_QW DIVIDE_AND_CEIL(R_BITS, 8 * QW_SIZE)
+#define R_YMM DIVIDE_AND_CEIL(R_BITS, 8 * YMM_SIZE)
+#define R_ZMM DIVIDE_AND_CEIL(R_BITS, 8 * ZMM_SIZE)
+
+#define N_SIZE DIVIDE_AND_CEIL(N_BITS, 8)
+
+#define R_BLOCKS DIVIDE_AND_CEIL(R_BITS, BLOCK_SIZE)
+#define R_PADDED (R_BLOCKS * BLOCK_SIZE)
+#define R_PADDED_SIZE (R_PADDED / 8)
+#define R_PADDED_QW (R_PADDED / 64)
+
+#define N_BLOCKS DIVIDE_AND_CEIL(N_BITS, BLOCK_SIZE)
+#define N_PADDED (N_BLOCKS * BLOCK_SIZE)
+#define N_PADDED_SIZE (N_PADDED / 8)
+#define N_PADDED_QW (N_PADDED / 64)
+
+#define R_DDQWORDS_BITS (DIVIDE_AND_CEIL(R_BITS, ALL_YMM_SIZE) * ALL_YMM_SIZE)
+bike_static_assert((R_BITS % ALL_YMM_SIZE != 0), rbits_512_err);
+
+#define N_DDQWORDS_BITS (R_DDQWORDS_BITS + R_BITS)
+bike_static_assert((N_BITS % ALL_YMM_SIZE != 0), nbits_512_err);
+
+#define LAST_R_QW_LEAD (R_BITS & MASK(6))
+#define LAST_R_QW_TRAIL (64 - LAST_R_QW_LEAD)
+#define LAST_R_QW_MASK MASK(LAST_R_QW_LEAD)
+
+#define LAST_R_BYTE_LEAD (R_BITS & MASK(3))
+#define LAST_R_BYTE_TRAIL (8 - LAST_R_BYTE_LEAD)
+#define LAST_R_BYTE_MASK MASK(LAST_R_BYTE_LEAD)
+
+// BIKE auxiliary functions parameters:
+#define ELL_K_BITS 256
+#define ELL_K_SIZE (ELL_K_BITS / 8)
+
+////////////////////////////////
+// Parameters for the BG decoder.
+////////////////////////////////
+#define DELTA 3
+#define SLICES (LOG2_MSB(DV) + 1)
+
+#define BGF_DECODER
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_r2_kem.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_r2_kem.c
index 3539827d14..730cb2f826 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_r2_kem.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/bike_r2_kem.c
@@ -1,374 +1,374 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker, Shay Gueron, and Dusan Kostic,
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com, dkostic@amazon.com)
- */
-
-#include "decode.h"
-#include "gf2x.h"
-#include "sampling.h"
-#include "sha.h"
-#include "tls/s2n_kem.h"
-
-_INLINE_ void
-split_e(OUT split_e_t *splitted_e, IN const e_t *e)
-{
- // Copy lower bytes (e0)
- memcpy(splitted_e->val[0].raw, e->raw, R_SIZE);
-
- // Now load second value
- for(uint32_t i = R_SIZE; i < N_SIZE; ++i)
- {
- splitted_e->val[1].raw[i - R_SIZE] =
- ((e->raw[i] << LAST_R_BYTE_TRAIL) | (e->raw[i - 1] >> LAST_R_BYTE_LEAD));
- }
-
- // Fix corner case
- if(N_SIZE < (2ULL * R_SIZE))
- {
- splitted_e->val[1].raw[R_SIZE - 1] = (e->raw[N_SIZE - 1] >> LAST_R_BYTE_LEAD);
- }
-
- // Fix last value
- splitted_e->val[0].raw[R_SIZE - 1] &= LAST_R_BYTE_MASK;
- splitted_e->val[1].raw[R_SIZE - 1] &= LAST_R_BYTE_MASK;
-}
-
-_INLINE_ void
-translate_hash_to_ss(OUT ss_t *ss, IN sha_hash_t *hash)
-{
- bike_static_assert(sizeof(*hash) >= sizeof(*ss), hash_size_lt_ss_size);
- memcpy(ss->raw, hash->u.raw, sizeof(*ss));
-}
-
-_INLINE_ void
-translate_hash_to_seed(OUT seed_t *seed, IN sha_hash_t *hash)
-{
- bike_static_assert(sizeof(*hash) >= sizeof(*seed), hash_size_lt_seed_size);
- memcpy(seed->raw, hash->u.raw, sizeof(*seed));
-}
-
-_INLINE_ ret_t
-calc_pk(OUT pk_t *pk, IN const seed_t *g_seed, IN const pad_sk_t p_sk)
-{
- // PK is dbl padded because modmul require some scratch space for the
- // multiplication result
- dbl_pad_pk_t p_pk = {0};
-
- // Intialized padding to zero
- DEFER_CLEANUP(padded_r_t g = {0}, padded_r_cleanup);
-
- GUARD(sample_uniform_r_bits(&g.val, g_seed, MUST_BE_ODD));
-
- // Calculate (g0, g1) = (g*h1, g*h0)
- GUARD(gf2x_mod_mul((uint64_t *)&p_pk[0], (const uint64_t *)&g,
- (const uint64_t *)&p_sk[1]));
- GUARD(gf2x_mod_mul((uint64_t *)&p_pk[1], (const uint64_t *)&g,
- (const uint64_t *)&p_sk[0]));
-
- // Copy the data to the output parameters.
- pk->val[0] = p_pk[0].val;
- pk->val[1] = p_pk[1].val;
-
- print("g: ", (const uint64_t *)g.val.raw, R_BITS);
- print("g0: ", (const uint64_t *)&p_pk[0], R_BITS);
- print("g1: ", (uint64_t *)&p_pk[1], R_BITS);
-
- return SUCCESS;
-}
-
-// The function H is required by BIKE-1- Round 2 variant. It uses the
-// extract-then-expand paradigm, based on SHA384 and AES256-CTR PRNG, to produce
-// e from (m*f0, m*f1):
-_INLINE_ ret_t
-function_h(OUT split_e_t *splitted_e, IN const r_t *in0, IN const r_t *in1)
-{
- DEFER_CLEANUP(generic_param_n_t tmp, generic_param_n_cleanup);
- DEFER_CLEANUP(sha_hash_t hash_seed = {0}, sha_hash_cleanup);
- DEFER_CLEANUP(seed_t seed_for_hash, seed_cleanup);
- DEFER_CLEANUP(aes_ctr_prf_state_t prf_state = {0}, finalize_aes_ctr_prf);
-
- tmp.val[0] = *in0;
- tmp.val[1] = *in1;
-
- // Hash (m*f0, m*f1) to generate a seed:
- sha(&hash_seed, sizeof(tmp), (uint8_t *)&tmp);
-
- // Format the seed as a 32-bytes input:
- translate_hash_to_seed(&seed_for_hash, &hash_seed);
-
- // Use the seed to generate a sparse error vector e:
- DMSG(" Generating random error.\n");
- GUARD(init_aes_ctr_prf_state(&prf_state, MAX_AES_INVOKATION, &seed_for_hash));
-
- DEFER_CLEANUP(padded_e_t e, padded_e_cleanup);
- DEFER_CLEANUP(ALIGN(8) compressed_idx_t_t dummy, compressed_idx_t_cleanup);
-
- GUARD(generate_sparse_rep((uint64_t *)&e, dummy.val, T1, N_BITS, sizeof(e),
- &prf_state));
- split_e(splitted_e, &e.val);
-
- return SUCCESS;
-}
-
-_INLINE_ ret_t
-encrypt(OUT ct_t *ct, OUT split_e_t *mf, IN const pk_t *pk, IN const seed_t *seed)
-{
- DEFER_CLEANUP(padded_r_t m = {0}, padded_r_cleanup);
-
- DMSG(" Sampling m.\n");
- GUARD(sample_uniform_r_bits(&m.val, seed, NO_RESTRICTION));
-
- // Pad the public key
- pad_pk_t p_pk = {0};
- p_pk[0].val = pk->val[0];
- p_pk[1].val = pk->val[1];
-
- // Pad the ciphertext
- pad_ct_t p_ct = {0};
- p_ct[0].val = ct->val[0];
- p_ct[1].val = ct->val[1];
-
- DEFER_CLEANUP(dbl_pad_ct_t mf_int = {0}, dbl_pad_ct_cleanup);
-
- DMSG(" Computing m*f0 and m*f1.\n");
- GUARD(
- gf2x_mod_mul((uint64_t *)&mf_int[0], (uint64_t *)&m, (uint64_t *)&p_pk[0]));
- GUARD(
- gf2x_mod_mul((uint64_t *)&mf_int[1], (uint64_t *)&m, (uint64_t *)&p_pk[1]));
-
- DEFER_CLEANUP(split_e_t splitted_e, split_e_cleanup);
-
- DMSG(" Computing the hash function e <- H(m*f0, m*f1).\n");
- GUARD(function_h(&splitted_e, &mf_int[0].val, &mf_int[1].val));
-
- DMSG(" Addding Error to the ciphertext.\n");
- GUARD(gf2x_add(p_ct[0].val.raw, mf_int[0].val.raw, splitted_e.val[0].raw,
- R_SIZE));
- GUARD(gf2x_add(p_ct[1].val.raw, mf_int[1].val.raw, splitted_e.val[1].raw,
- R_SIZE));
-
- // Copy the data to the output parameters.
- ct->val[0] = p_ct[0].val;
- ct->val[1] = p_ct[1].val;
-
- // Copy the internal mf to the output parameters.
- mf->val[0] = mf_int[0].val;
- mf->val[1] = mf_int[1].val;
-
- print("e0: ", (uint64_t *)splitted_e.val[0].raw, R_BITS);
- print("e1: ", (uint64_t *)splitted_e.val[1].raw, R_BITS);
- print("c0: ", (uint64_t *)p_ct[0].val.raw, R_BITS);
- print("c1: ", (uint64_t *)p_ct[1].val.raw, R_BITS);
-
- return SUCCESS;
-}
-
-_INLINE_ ret_t
-reencrypt(OUT pad_ct_t ce,
- OUT split_e_t *e2,
- IN const split_e_t *e,
- IN const ct_t *l_ct)
-{
- // Compute (c0 + e0') and (c1 + e1')
- GUARD(gf2x_add(ce[0].val.raw, l_ct->val[0].raw, e->val[0].raw, R_SIZE));
- GUARD(gf2x_add(ce[1].val.raw, l_ct->val[1].raw, e->val[1].raw, R_SIZE));
-
- // (e0'', e1'') <-- H(c0 + e0', c1 + e1')
- GUARD(function_h(e2, &ce[0].val, &ce[1].val));
-
- return SUCCESS;
-}
-
-// Generate the Shared Secret K(mf0, mf1, c) by either
-// K(c0+e0', c1+e1', c) or K(sigma0, sigma1, c)
-_INLINE_ void
-get_ss(OUT ss_t *out, IN const r_t *in0, IN const r_t *in1, IN const ct_t *ct)
-{
- DMSG(" Enter get_ss.\n");
-
- uint8_t tmp[4 * R_SIZE];
- memcpy(tmp, in0->raw, R_SIZE);
- memcpy(tmp + R_SIZE, in1->raw, R_SIZE);
- memcpy(tmp + 2 * R_SIZE, ct, sizeof(*ct));
-
- // Calculate the hash digest
- DEFER_CLEANUP(sha_hash_t hash = {0}, sha_hash_cleanup);
- sha(&hash, sizeof(tmp), tmp);
-
- // Truncate the resulting digest, to produce the key K, by copying only the
- // desired number of LSBs.
- translate_hash_to_ss(out, &hash);
-
- secure_clean(tmp, sizeof(tmp));
- DMSG(" Exit get_ss.\n");
-}
-////////////////////////////////////////////////////////////////////////////////
-// The three APIs below (keypair, encapsulate, decapsulate) are defined by NIST:
-////////////////////////////////////////////////////////////////////////////////
-int
-BIKE1_L1_R2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk)
-{
- notnull_check(sk);
- notnull_check(pk);
-
- // Convert to this implementation types
- pk_t *l_pk = (pk_t *)pk;
- DEFER_CLEANUP(ALIGN(8) sk_t l_sk = {0}, sk_cleanup);
-
- // For DRBG and AES_PRF
- DEFER_CLEANUP(seeds_t seeds = {0}, seeds_cleanup);
- DEFER_CLEANUP(aes_ctr_prf_state_t h_prf_state = {0}, aes_ctr_prf_state_cleanup);
-
- // For sigma0/1/2
- DEFER_CLEANUP(aes_ctr_prf_state_t s_prf_state = {0}, aes_ctr_prf_state_cleanup);
-
- // Padded for internal use only (the padded data is not released).
- DEFER_CLEANUP(pad_sk_t p_sk = {0}, pad_sk_cleanup);
-
- // Get the entropy seeds.
- GUARD(get_seeds(&seeds));
-
- DMSG(" Enter crypto_kem_keypair.\n");
- DMSG(" Calculating the secret key.\n");
-
- // h0 and h1 use the same context
- GUARD(init_aes_ctr_prf_state(&h_prf_state, MAX_AES_INVOKATION, &seeds.seed[0]));
-
- // sigma0/1/2 use the same context.
- GUARD(init_aes_ctr_prf_state(&s_prf_state, MAX_AES_INVOKATION, &seeds.seed[2]));
-
- GUARD(generate_sparse_rep((uint64_t *)&p_sk[0], l_sk.wlist[0].val, DV, R_BITS,
- sizeof(p_sk[0]), &h_prf_state));
-
- // Sample the sigmas
- GUARD(sample_uniform_r_bits_with_fixed_prf_context(&l_sk.sigma0, &s_prf_state,
- NO_RESTRICTION));
- GUARD(sample_uniform_r_bits_with_fixed_prf_context(&l_sk.sigma1, &s_prf_state,
- NO_RESTRICTION));
-
- GUARD(generate_sparse_rep((uint64_t *)&p_sk[1], l_sk.wlist[1].val, DV, R_BITS,
- sizeof(p_sk[1]), &h_prf_state));
-
- // Copy data
- l_sk.bin[0] = p_sk[0].val;
- l_sk.bin[1] = p_sk[1].val;
-
- DMSG(" Calculating the public key.\n");
-
- GUARD(calc_pk(l_pk, &seeds.seed[1], p_sk));
-
- memcpy(sk, &l_sk, sizeof(l_sk));
-
- print("h0: ", (uint64_t *)&l_sk.bin[0], R_BITS);
- print("h1: ", (uint64_t *)&l_sk.bin[1], R_BITS);
- print("h0c:", (uint64_t *)&l_sk.wlist[0], SIZEOF_BITS(compressed_idx_dv_t));
- print("h1c:", (uint64_t *)&l_sk.wlist[1], SIZEOF_BITS(compressed_idx_dv_t));
- print("sigma0: ", (uint64_t *)l_sk.sigma0.raw, R_BITS);
- print("sigma1: ", (uint64_t *)l_sk.sigma1.raw, R_BITS);
-
- DMSG(" Exit crypto_kem_keypair.\n");
-
- return SUCCESS;
-}
-
-// Encapsulate - pk is the public key,
-// ct is a key encapsulation message (ciphertext),
-// ss is the shared secret.
-int
-BIKE1_L1_R2_crypto_kem_enc(OUT unsigned char * ct,
- OUT unsigned char * ss,
- IN const unsigned char *pk)
-{
- DMSG(" Enter crypto_kem_enc.\n");
-
- // Convert to the types that are used by this implementation
- const pk_t *l_pk = (const pk_t *)pk;
- ct_t * l_ct = (ct_t *)ct;
- ss_t * l_ss = (ss_t *)ss;
-
- notnull_check(pk);
- notnull_check(ct);
- notnull_check(ss);
-
- // For NIST DRBG_CTR
- DEFER_CLEANUP(seeds_t seeds = {0}, seeds_cleanup);
-
- // Get the entropy seeds.
- GUARD(get_seeds(&seeds));
-
- DMSG(" Encrypting.\n");
- // In fact, seed[0] should be used.
- // Here, we stay consistent with BIKE's reference code
- // that chooses the seconde seed.
- DEFER_CLEANUP(split_e_t mf, split_e_cleanup);
- GUARD(encrypt(l_ct, &mf, l_pk, &seeds.seed[1]));
-
- DMSG(" Generating shared secret.\n");
- get_ss(l_ss, &mf.val[0], &mf.val[1], l_ct);
-
- print("ss: ", (uint64_t *)l_ss->raw, SIZEOF_BITS(*l_ss));
- DMSG(" Exit crypto_kem_enc.\n");
- return SUCCESS;
-}
-
-// Decapsulate - ct is a key encapsulation message (ciphertext),
-// sk is the private key,
-// ss is the shared secret
-int
-BIKE1_L1_R2_crypto_kem_dec(OUT unsigned char * ss,
- IN const unsigned char *ct,
- IN const unsigned char *sk)
-{
- DMSG(" Enter crypto_kem_dec.\n");
-
- // Convert to the types used by this implementation
- const ct_t *l_ct = (const ct_t *)ct;
- ss_t * l_ss = (ss_t *)ss;
- notnull_check(sk);
- notnull_check(ct);
- notnull_check(ss);
-
- DEFER_CLEANUP(ALIGN(8) sk_t l_sk, sk_cleanup);
- memcpy(&l_sk, sk, sizeof(l_sk));
-
- // Force zero initialization.
- DEFER_CLEANUP(syndrome_t syndrome = {0}, syndrome_cleanup);
- DEFER_CLEANUP(split_e_t e, split_e_cleanup);
-
- DMSG(" Computing s.\n");
- GUARD(compute_syndrome(&syndrome, l_ct, &l_sk));
-
- DMSG(" Decoding.\n");
- uint32_t dec_ret = decode(&e, &syndrome, l_ct, &l_sk) != SUCCESS ? 0 : 1;
-
- DEFER_CLEANUP(split_e_t e2, split_e_cleanup);
- DEFER_CLEANUP(pad_ct_t ce, pad_ct_cleanup);
- GUARD(reencrypt(ce, &e2, &e, l_ct));
-
- // Check if the decoding is successful.
- // Check if the error weight equals T1.
- // Check if (e0', e1') == (e0'', e1'').
- volatile uint32_t success_cond;
- success_cond = dec_ret;
- success_cond &= secure_cmp32(T1, r_bits_vector_weight(&e.val[0]) +
- r_bits_vector_weight(&e.val[1]));
- success_cond &= secure_cmp((uint8_t *)&e, (uint8_t *)&e2, sizeof(e));
-
- ss_t ss_succ = {0};
- ss_t ss_fail = {0};
-
- get_ss(&ss_succ, &ce[0].val, &ce[1].val, l_ct);
- get_ss(&ss_fail, &l_sk.sigma0, &l_sk.sigma1, l_ct);
-
- uint8_t mask = ~secure_l32_mask(0, success_cond);
- for(uint32_t i = 0; i < sizeof(*l_ss); i++)
- {
- l_ss->raw[i] = (mask & ss_succ.raw[i]) | (~mask & ss_fail.raw[i]);
- }
-
- DMSG(" Exit crypto_kem_dec.\n");
- return SUCCESS;
-}
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron, and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com, dkostic@amazon.com)
+ */
+
+#include "decode.h"
+#include "gf2x.h"
+#include "sampling.h"
+#include "sha.h"
+#include "tls/s2n_kem.h"
+
+_INLINE_ void
+split_e(OUT split_e_t *splitted_e, IN const e_t *e)
+{
+ // Copy lower bytes (e0)
+ memcpy(splitted_e->val[0].raw, e->raw, R_SIZE);
+
+ // Now load second value
+ for(uint32_t i = R_SIZE; i < N_SIZE; ++i)
+ {
+ splitted_e->val[1].raw[i - R_SIZE] =
+ ((e->raw[i] << LAST_R_BYTE_TRAIL) | (e->raw[i - 1] >> LAST_R_BYTE_LEAD));
+ }
+
+ // Fix corner case
+ if(N_SIZE < (2ULL * R_SIZE))
+ {
+ splitted_e->val[1].raw[R_SIZE - 1] = (e->raw[N_SIZE - 1] >> LAST_R_BYTE_LEAD);
+ }
+
+ // Fix last value
+ splitted_e->val[0].raw[R_SIZE - 1] &= LAST_R_BYTE_MASK;
+ splitted_e->val[1].raw[R_SIZE - 1] &= LAST_R_BYTE_MASK;
+}
+
+_INLINE_ void
+translate_hash_to_ss(OUT ss_t *ss, IN sha_hash_t *hash)
+{
+ bike_static_assert(sizeof(*hash) >= sizeof(*ss), hash_size_lt_ss_size);
+ memcpy(ss->raw, hash->u.raw, sizeof(*ss));
+}
+
+_INLINE_ void
+translate_hash_to_seed(OUT seed_t *seed, IN sha_hash_t *hash)
+{
+ bike_static_assert(sizeof(*hash) >= sizeof(*seed), hash_size_lt_seed_size);
+ memcpy(seed->raw, hash->u.raw, sizeof(*seed));
+}
+
+_INLINE_ ret_t
+calc_pk(OUT pk_t *pk, IN const seed_t *g_seed, IN const pad_sk_t p_sk)
+{
+ // PK is dbl padded because modmul require some scratch space for the
+ // multiplication result
+ dbl_pad_pk_t p_pk = {0};
+
+ // Intialized padding to zero
+ DEFER_CLEANUP(padded_r_t g = {0}, padded_r_cleanup);
+
+ GUARD(sample_uniform_r_bits(&g.val, g_seed, MUST_BE_ODD));
+
+ // Calculate (g0, g1) = (g*h1, g*h0)
+ GUARD(gf2x_mod_mul((uint64_t *)&p_pk[0], (const uint64_t *)&g,
+ (const uint64_t *)&p_sk[1]));
+ GUARD(gf2x_mod_mul((uint64_t *)&p_pk[1], (const uint64_t *)&g,
+ (const uint64_t *)&p_sk[0]));
+
+ // Copy the data to the output parameters.
+ pk->val[0] = p_pk[0].val;
+ pk->val[1] = p_pk[1].val;
+
+ print("g: ", (const uint64_t *)g.val.raw, R_BITS);
+ print("g0: ", (const uint64_t *)&p_pk[0], R_BITS);
+ print("g1: ", (uint64_t *)&p_pk[1], R_BITS);
+
+ return SUCCESS;
+}
+
+// The function H is required by BIKE-1- Round 2 variant. It uses the
+// extract-then-expand paradigm, based on SHA384 and AES256-CTR PRNG, to produce
+// e from (m*f0, m*f1):
+_INLINE_ ret_t
+function_h(OUT split_e_t *splitted_e, IN const r_t *in0, IN const r_t *in1)
+{
+ DEFER_CLEANUP(generic_param_n_t tmp, generic_param_n_cleanup);
+ DEFER_CLEANUP(sha_hash_t hash_seed = {0}, sha_hash_cleanup);
+ DEFER_CLEANUP(seed_t seed_for_hash, seed_cleanup);
+ DEFER_CLEANUP(aes_ctr_prf_state_t prf_state = {0}, finalize_aes_ctr_prf);
+
+ tmp.val[0] = *in0;
+ tmp.val[1] = *in1;
+
+ // Hash (m*f0, m*f1) to generate a seed:
+ sha(&hash_seed, sizeof(tmp), (uint8_t *)&tmp);
+
+ // Format the seed as a 32-bytes input:
+ translate_hash_to_seed(&seed_for_hash, &hash_seed);
+
+ // Use the seed to generate a sparse error vector e:
+ DMSG(" Generating random error.\n");
+ GUARD(init_aes_ctr_prf_state(&prf_state, MAX_AES_INVOKATION, &seed_for_hash));
+
+ DEFER_CLEANUP(padded_e_t e, padded_e_cleanup);
+ DEFER_CLEANUP(ALIGN(8) compressed_idx_t_t dummy, compressed_idx_t_cleanup);
+
+ GUARD(generate_sparse_rep((uint64_t *)&e, dummy.val, T1, N_BITS, sizeof(e),
+ &prf_state));
+ split_e(splitted_e, &e.val);
+
+ return SUCCESS;
+}
+
+_INLINE_ ret_t
+encrypt(OUT ct_t *ct, OUT split_e_t *mf, IN const pk_t *pk, IN const seed_t *seed)
+{
+ DEFER_CLEANUP(padded_r_t m = {0}, padded_r_cleanup);
+
+ DMSG(" Sampling m.\n");
+ GUARD(sample_uniform_r_bits(&m.val, seed, NO_RESTRICTION));
+
+ // Pad the public key
+ pad_pk_t p_pk = {0};
+ p_pk[0].val = pk->val[0];
+ p_pk[1].val = pk->val[1];
+
+ // Pad the ciphertext
+ pad_ct_t p_ct = {0};
+ p_ct[0].val = ct->val[0];
+ p_ct[1].val = ct->val[1];
+
+ DEFER_CLEANUP(dbl_pad_ct_t mf_int = {0}, dbl_pad_ct_cleanup);
+
+ DMSG(" Computing m*f0 and m*f1.\n");
+ GUARD(
+ gf2x_mod_mul((uint64_t *)&mf_int[0], (uint64_t *)&m, (uint64_t *)&p_pk[0]));
+ GUARD(
+ gf2x_mod_mul((uint64_t *)&mf_int[1], (uint64_t *)&m, (uint64_t *)&p_pk[1]));
+
+ DEFER_CLEANUP(split_e_t splitted_e, split_e_cleanup);
+
+ DMSG(" Computing the hash function e <- H(m*f0, m*f1).\n");
+ GUARD(function_h(&splitted_e, &mf_int[0].val, &mf_int[1].val));
+
+ DMSG(" Addding Error to the ciphertext.\n");
+ GUARD(gf2x_add(p_ct[0].val.raw, mf_int[0].val.raw, splitted_e.val[0].raw,
+ R_SIZE));
+ GUARD(gf2x_add(p_ct[1].val.raw, mf_int[1].val.raw, splitted_e.val[1].raw,
+ R_SIZE));
+
+ // Copy the data to the output parameters.
+ ct->val[0] = p_ct[0].val;
+ ct->val[1] = p_ct[1].val;
+
+ // Copy the internal mf to the output parameters.
+ mf->val[0] = mf_int[0].val;
+ mf->val[1] = mf_int[1].val;
+
+ print("e0: ", (uint64_t *)splitted_e.val[0].raw, R_BITS);
+ print("e1: ", (uint64_t *)splitted_e.val[1].raw, R_BITS);
+ print("c0: ", (uint64_t *)p_ct[0].val.raw, R_BITS);
+ print("c1: ", (uint64_t *)p_ct[1].val.raw, R_BITS);
+
+ return SUCCESS;
+}
+
+_INLINE_ ret_t
+reencrypt(OUT pad_ct_t ce,
+ OUT split_e_t *e2,
+ IN const split_e_t *e,
+ IN const ct_t *l_ct)
+{
+ // Compute (c0 + e0') and (c1 + e1')
+ GUARD(gf2x_add(ce[0].val.raw, l_ct->val[0].raw, e->val[0].raw, R_SIZE));
+ GUARD(gf2x_add(ce[1].val.raw, l_ct->val[1].raw, e->val[1].raw, R_SIZE));
+
+ // (e0'', e1'') <-- H(c0 + e0', c1 + e1')
+ GUARD(function_h(e2, &ce[0].val, &ce[1].val));
+
+ return SUCCESS;
+}
+
+// Generate the Shared Secret K(mf0, mf1, c) by either
+// K(c0+e0', c1+e1', c) or K(sigma0, sigma1, c)
+_INLINE_ void
+get_ss(OUT ss_t *out, IN const r_t *in0, IN const r_t *in1, IN const ct_t *ct)
+{
+ DMSG(" Enter get_ss.\n");
+
+ uint8_t tmp[4 * R_SIZE];
+ memcpy(tmp, in0->raw, R_SIZE);
+ memcpy(tmp + R_SIZE, in1->raw, R_SIZE);
+ memcpy(tmp + 2 * R_SIZE, ct, sizeof(*ct));
+
+ // Calculate the hash digest
+ DEFER_CLEANUP(sha_hash_t hash = {0}, sha_hash_cleanup);
+ sha(&hash, sizeof(tmp), tmp);
+
+ // Truncate the resulting digest, to produce the key K, by copying only the
+ // desired number of LSBs.
+ translate_hash_to_ss(out, &hash);
+
+ secure_clean(tmp, sizeof(tmp));
+ DMSG(" Exit get_ss.\n");
+}
+////////////////////////////////////////////////////////////////////////////////
+// The three APIs below (keypair, encapsulate, decapsulate) are defined by NIST:
+////////////////////////////////////////////////////////////////////////////////
+int
+BIKE1_L1_R2_crypto_kem_keypair(OUT unsigned char *pk, OUT unsigned char *sk)
+{
+ notnull_check(sk);
+ notnull_check(pk);
+
+ // Convert to this implementation types
+ pk_t *l_pk = (pk_t *)pk;
+ DEFER_CLEANUP(ALIGN(8) sk_t l_sk = {0}, sk_cleanup);
+
+ // For DRBG and AES_PRF
+ DEFER_CLEANUP(seeds_t seeds = {0}, seeds_cleanup);
+ DEFER_CLEANUP(aes_ctr_prf_state_t h_prf_state = {0}, aes_ctr_prf_state_cleanup);
+
+ // For sigma0/1/2
+ DEFER_CLEANUP(aes_ctr_prf_state_t s_prf_state = {0}, aes_ctr_prf_state_cleanup);
+
+ // Padded for internal use only (the padded data is not released).
+ DEFER_CLEANUP(pad_sk_t p_sk = {0}, pad_sk_cleanup);
+
+ // Get the entropy seeds.
+ GUARD(get_seeds(&seeds));
+
+ DMSG(" Enter crypto_kem_keypair.\n");
+ DMSG(" Calculating the secret key.\n");
+
+ // h0 and h1 use the same context
+ GUARD(init_aes_ctr_prf_state(&h_prf_state, MAX_AES_INVOKATION, &seeds.seed[0]));
+
+ // sigma0/1/2 use the same context.
+ GUARD(init_aes_ctr_prf_state(&s_prf_state, MAX_AES_INVOKATION, &seeds.seed[2]));
+
+ GUARD(generate_sparse_rep((uint64_t *)&p_sk[0], l_sk.wlist[0].val, DV, R_BITS,
+ sizeof(p_sk[0]), &h_prf_state));
+
+ // Sample the sigmas
+ GUARD(sample_uniform_r_bits_with_fixed_prf_context(&l_sk.sigma0, &s_prf_state,
+ NO_RESTRICTION));
+ GUARD(sample_uniform_r_bits_with_fixed_prf_context(&l_sk.sigma1, &s_prf_state,
+ NO_RESTRICTION));
+
+ GUARD(generate_sparse_rep((uint64_t *)&p_sk[1], l_sk.wlist[1].val, DV, R_BITS,
+ sizeof(p_sk[1]), &h_prf_state));
+
+ // Copy data
+ l_sk.bin[0] = p_sk[0].val;
+ l_sk.bin[1] = p_sk[1].val;
+
+ DMSG(" Calculating the public key.\n");
+
+ GUARD(calc_pk(l_pk, &seeds.seed[1], p_sk));
+
+ memcpy(sk, &l_sk, sizeof(l_sk));
+
+ print("h0: ", (uint64_t *)&l_sk.bin[0], R_BITS);
+ print("h1: ", (uint64_t *)&l_sk.bin[1], R_BITS);
+ print("h0c:", (uint64_t *)&l_sk.wlist[0], SIZEOF_BITS(compressed_idx_dv_t));
+ print("h1c:", (uint64_t *)&l_sk.wlist[1], SIZEOF_BITS(compressed_idx_dv_t));
+ print("sigma0: ", (uint64_t *)l_sk.sigma0.raw, R_BITS);
+ print("sigma1: ", (uint64_t *)l_sk.sigma1.raw, R_BITS);
+
+ DMSG(" Exit crypto_kem_keypair.\n");
+
+ return SUCCESS;
+}
+
+// Encapsulate - pk is the public key,
+// ct is a key encapsulation message (ciphertext),
+// ss is the shared secret.
+int
+BIKE1_L1_R2_crypto_kem_enc(OUT unsigned char * ct,
+ OUT unsigned char * ss,
+ IN const unsigned char *pk)
+{
+ DMSG(" Enter crypto_kem_enc.\n");
+
+ // Convert to the types that are used by this implementation
+ const pk_t *l_pk = (const pk_t *)pk;
+ ct_t * l_ct = (ct_t *)ct;
+ ss_t * l_ss = (ss_t *)ss;
+
+ notnull_check(pk);
+ notnull_check(ct);
+ notnull_check(ss);
+
+ // For NIST DRBG_CTR
+ DEFER_CLEANUP(seeds_t seeds = {0}, seeds_cleanup);
+
+ // Get the entropy seeds.
+ GUARD(get_seeds(&seeds));
+
+ DMSG(" Encrypting.\n");
+ // In fact, seed[0] should be used.
+ // Here, we stay consistent with BIKE's reference code
+ // that chooses the seconde seed.
+ DEFER_CLEANUP(split_e_t mf, split_e_cleanup);
+ GUARD(encrypt(l_ct, &mf, l_pk, &seeds.seed[1]));
+
+ DMSG(" Generating shared secret.\n");
+ get_ss(l_ss, &mf.val[0], &mf.val[1], l_ct);
+
+ print("ss: ", (uint64_t *)l_ss->raw, SIZEOF_BITS(*l_ss));
+ DMSG(" Exit crypto_kem_enc.\n");
+ return SUCCESS;
+}
+
+// Decapsulate - ct is a key encapsulation message (ciphertext),
+// sk is the private key,
+// ss is the shared secret
+int
+BIKE1_L1_R2_crypto_kem_dec(OUT unsigned char * ss,
+ IN const unsigned char *ct,
+ IN const unsigned char *sk)
+{
+ DMSG(" Enter crypto_kem_dec.\n");
+
+ // Convert to the types used by this implementation
+ const ct_t *l_ct = (const ct_t *)ct;
+ ss_t * l_ss = (ss_t *)ss;
+ notnull_check(sk);
+ notnull_check(ct);
+ notnull_check(ss);
+
+ DEFER_CLEANUP(ALIGN(8) sk_t l_sk, sk_cleanup);
+ memcpy(&l_sk, sk, sizeof(l_sk));
+
+ // Force zero initialization.
+ DEFER_CLEANUP(syndrome_t syndrome = {0}, syndrome_cleanup);
+ DEFER_CLEANUP(split_e_t e, split_e_cleanup);
+
+ DMSG(" Computing s.\n");
+ GUARD(compute_syndrome(&syndrome, l_ct, &l_sk));
+
+ DMSG(" Decoding.\n");
+ uint32_t dec_ret = decode(&e, &syndrome, l_ct, &l_sk) != SUCCESS ? 0 : 1;
+
+ DEFER_CLEANUP(split_e_t e2, split_e_cleanup);
+ DEFER_CLEANUP(pad_ct_t ce, pad_ct_cleanup);
+ GUARD(reencrypt(ce, &e2, &e, l_ct));
+
+ // Check if the decoding is successful.
+ // Check if the error weight equals T1.
+ // Check if (e0', e1') == (e0'', e1'').
+ volatile uint32_t success_cond;
+ success_cond = dec_ret;
+ success_cond &= secure_cmp32(T1, r_bits_vector_weight(&e.val[0]) +
+ r_bits_vector_weight(&e.val[1]));
+ success_cond &= secure_cmp((uint8_t *)&e, (uint8_t *)&e2, sizeof(e));
+
+ ss_t ss_succ = {0};
+ ss_t ss_fail = {0};
+
+ get_ss(&ss_succ, &ce[0].val, &ce[1].val, l_ct);
+ get_ss(&ss_fail, &l_sk.sigma0, &l_sk.sigma1, l_ct);
+
+ uint8_t mask = ~secure_l32_mask(0, success_cond);
+ for(uint32_t i = 0; i < sizeof(*l_ss); i++)
+ {
+ l_ss->raw[i] = (mask & ss_succ.raw[i]) | (~mask & ss_fail.raw[i]);
+ }
+
+ DMSG(" Exit crypto_kem_dec.\n");
+ return SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/cleanup.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/cleanup.h
index 6bacfaa45a..67205216d3 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/cleanup.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/cleanup.h
@@ -1,131 +1,131 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#pragma once
-#include "types.h"
-#include "utils/s2n_safety.h"
-
-_INLINE_ void
-secure_clean(OUT uint8_t *p, IN const uint32_t len)
-{
-#ifdef _WIN32
- SecureZeroMemory(p, len);
-#else
- typedef void *(*memset_t)(void *, int, size_t);
- static volatile memset_t memset_func = memset;
- memset_func(p, 0, len);
-#endif
-}
-
-_INLINE_ void
-r_cleanup(IN OUT r_t *o)
-{
- secure_clean((uint8_t *)o, sizeof(*o));
-}
-
-_INLINE_ void
-e_cleanup(IN OUT e_t *o)
-{
- secure_clean((uint8_t *)o, sizeof(*o));
-}
-
-_INLINE_ void
-padded_r_cleanup(IN OUT padded_r_t *o)
-{
- secure_clean((uint8_t *)o, sizeof(*o));
-}
-
-_INLINE_ void
-padded_e_cleanup(IN OUT padded_e_t *o)
-{
- secure_clean((uint8_t *)o, sizeof(*o));
-}
-
-_INLINE_ void
-split_e_cleanup(IN OUT split_e_t *o)
-{
- secure_clean((uint8_t *)o, sizeof(*o));
-}
-
-_INLINE_ void
-sk_cleanup(IN OUT sk_t *o)
-{
- secure_clean((uint8_t *)o, sizeof(*o));
-}
-
-_INLINE_ void
-pad_sk_cleanup(IN OUT pad_sk_t *o)
-{
- secure_clean((uint8_t *)o[0], sizeof(*o));
-}
-
-_INLINE_ void
-pad_ct_cleanup(IN OUT pad_ct_t *o)
-{
- secure_clean((uint8_t *)o[0], sizeof(*o));
-}
-
-_INLINE_ void
-dbl_pad_ct_cleanup(IN OUT dbl_pad_ct_t *o)
-{
- secure_clean((uint8_t *)o[0], sizeof(*o));
-}
-
-_INLINE_ void
-seed_cleanup(IN OUT seed_t *o)
-{
- secure_clean((uint8_t *)o, sizeof(*o));
-}
-
-_INLINE_ void
-syndrome_cleanup(IN OUT syndrome_t *o)
-{
- secure_clean((uint8_t *)o, sizeof(*o));
-}
-
-_INLINE_ void
-dbl_pad_syndrome_cleanup(IN OUT dbl_pad_syndrome_t *o)
-{
- secure_clean((uint8_t *)o[0], sizeof(*o));
-}
-
-_INLINE_ void
-compressed_idx_t_cleanup(IN OUT compressed_idx_t_t *o)
-{
- secure_clean((uint8_t *)o, sizeof(*o));
-}
-
-_INLINE_ void
-compressed_idx_dv_ar_cleanup(IN OUT compressed_idx_dv_ar_t *o)
-{
- for(int i = 0; i < N0; i++)
- {
- secure_clean((uint8_t *)&(*o)[i], sizeof((*o)[0]));
- }
-}
-
-_INLINE_ void
-generic_param_n_cleanup(IN OUT generic_param_n_t *o)
-{
- secure_clean((uint8_t *)o, sizeof(*o));
-}
-
-_INLINE_ void
-seeds_cleanup(IN OUT seeds_t *o)
-{
- for(int i = 0; i < NUM_OF_SEEDS; i++)
- {
- seed_cleanup(&(o->seed[i]));
- }
-}
-
-_INLINE_ void
-upc_cleanup(IN OUT upc_t *o)
-{
- secure_clean((uint8_t *)o, sizeof(*o));
-}
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#pragma once
+#include "types.h"
+#include "utils/s2n_safety.h"
+
+_INLINE_ void
+secure_clean(OUT uint8_t *p, IN const uint32_t len)
+{
+#ifdef _WIN32
+ SecureZeroMemory(p, len);
+#else
+ typedef void *(*memset_t)(void *, int, size_t);
+ static volatile memset_t memset_func = memset;
+ memset_func(p, 0, len);
+#endif
+}
+
+_INLINE_ void
+r_cleanup(IN OUT r_t *o)
+{
+ secure_clean((uint8_t *)o, sizeof(*o));
+}
+
+_INLINE_ void
+e_cleanup(IN OUT e_t *o)
+{
+ secure_clean((uint8_t *)o, sizeof(*o));
+}
+
+_INLINE_ void
+padded_r_cleanup(IN OUT padded_r_t *o)
+{
+ secure_clean((uint8_t *)o, sizeof(*o));
+}
+
+_INLINE_ void
+padded_e_cleanup(IN OUT padded_e_t *o)
+{
+ secure_clean((uint8_t *)o, sizeof(*o));
+}
+
+_INLINE_ void
+split_e_cleanup(IN OUT split_e_t *o)
+{
+ secure_clean((uint8_t *)o, sizeof(*o));
+}
+
+_INLINE_ void
+sk_cleanup(IN OUT sk_t *o)
+{
+ secure_clean((uint8_t *)o, sizeof(*o));
+}
+
+_INLINE_ void
+pad_sk_cleanup(IN OUT pad_sk_t *o)
+{
+ secure_clean((uint8_t *)o[0], sizeof(*o));
+}
+
+_INLINE_ void
+pad_ct_cleanup(IN OUT pad_ct_t *o)
+{
+ secure_clean((uint8_t *)o[0], sizeof(*o));
+}
+
+_INLINE_ void
+dbl_pad_ct_cleanup(IN OUT dbl_pad_ct_t *o)
+{
+ secure_clean((uint8_t *)o[0], sizeof(*o));
+}
+
+_INLINE_ void
+seed_cleanup(IN OUT seed_t *o)
+{
+ secure_clean((uint8_t *)o, sizeof(*o));
+}
+
+_INLINE_ void
+syndrome_cleanup(IN OUT syndrome_t *o)
+{
+ secure_clean((uint8_t *)o, sizeof(*o));
+}
+
+_INLINE_ void
+dbl_pad_syndrome_cleanup(IN OUT dbl_pad_syndrome_t *o)
+{
+ secure_clean((uint8_t *)o[0], sizeof(*o));
+}
+
+_INLINE_ void
+compressed_idx_t_cleanup(IN OUT compressed_idx_t_t *o)
+{
+ secure_clean((uint8_t *)o, sizeof(*o));
+}
+
+_INLINE_ void
+compressed_idx_dv_ar_cleanup(IN OUT compressed_idx_dv_ar_t *o)
+{
+ for(int i = 0; i < N0; i++)
+ {
+ secure_clean((uint8_t *)&(*o)[i], sizeof((*o)[0]));
+ }
+}
+
+_INLINE_ void
+generic_param_n_cleanup(IN OUT generic_param_n_t *o)
+{
+ secure_clean((uint8_t *)o, sizeof(*o));
+}
+
+_INLINE_ void
+seeds_cleanup(IN OUT seeds_t *o)
+{
+ for(int i = 0; i < NUM_OF_SEEDS; i++)
+ {
+ seed_cleanup(&(o->seed[i]));
+ }
+}
+
+_INLINE_ void
+upc_cleanup(IN OUT upc_t *o)
+{
+ secure_clean((uint8_t *)o, sizeof(*o));
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.c
index 404c6377da..ee37e7d82a 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.c
@@ -1,365 +1,365 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker, Shay Gueron, and Dusan Kostic,
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com, dkostic@amazon.com)
- *
- * [1] The optimizations are based on the description developed in the paper:
- * Drucker, Nir, and Shay Gueron. 2019. “A Toolbox for Software Optimization
- * of QC-MDPC Code-Based Cryptosystems.” Journal of Cryptographic Engineering,
- * January, 1–17. https://doi.org/10.1007/s13389-018-00200-4.
- *
- * [2] The decoder algorithm is the Black-Gray decoder in
- * the early submission of CAKE (due to N. Sandrier and R Misoczki).
- *
- * [3] The analysis for the constant time implementation is given in
- * Drucker, Nir, Shay Gueron, and Dusan Kostic. 2019.
- * “On Constant-Time QC-MDPC Decoding with Negligible Failure Rate.”
- * Cryptology EPrint Archive, 2019. https://eprint.iacr.org/2019/1289.
- *
- * [4] it was adapted to BGF in:
- * Drucker, Nir, Shay Gueron, and Dusan Kostic. 2019.
- * “QC-MDPC decoders with several shades of gray.”
- * Cryptology EPrint Archive, 2019. To be published.
- *
- * [5] Chou, T.: QcBits: Constant-Time Small-Key Code-Based Cryptography.
- * In: Gier-lichs, B., Poschmann, A.Y. (eds.) Cryptographic Hardware
- * and Embedded Systems– CHES 2016. pp. 280–300. Springer Berlin Heidelberg,
- * Berlin, Heidelberg (2016)
- *
- * [6] The rotate512_small funciton is a derivative of the code described in:
- * Guimarães, Antonio, Diego F Aranha, and Edson Borin. 2019.
- * “Optimized Implementation of QC-MDPC Code-Based Cryptography.”
- * Concurrency and Computation: Practice and Experience 31 (18):
- * e5089. https://doi.org/10.1002/cpe.5089.
- */
-
-#include "decode.h"
-#include "gf2x.h"
-#include "utilities.h"
-#include <string.h>
-
-// Decoding (bit-flipping) parameter
-#ifdef BG_DECODER
-# if(LEVEL == 1)
-# define MAX_IT 3
-# elif(LEVEL == 3)
-# define MAX_IT 4
-# elif(LEVEL == 5)
-# define MAX_IT 7
-# else
-# error "Level can only be 1/3/5"
-# endif
-#elif defined(BGF_DECODER)
-# if(LEVEL == 1)
-# define MAX_IT 5
-# elif(LEVEL == 3)
-# define MAX_IT 6
-# elif(LEVEL == 5)
-# define MAX_IT 7
-# else
-# error "Level can only be 1/3/5"
-# endif
-#endif
-
-// Duplicates the first R_BITS of the syndrome three times
-// |------------------------------------------|
-// | Third copy | Second copy | first R_BITS |
-// |------------------------------------------|
-// This is required by the rotate functions.
-_INLINE_ void
-dup(IN OUT syndrome_t *s)
-{
- s->qw[R_QW - 1] =
- (s->qw[0] << LAST_R_QW_LEAD) | (s->qw[R_QW - 1] & LAST_R_QW_MASK);
-
- for(size_t i = 0; i < (2 * R_QW) - 1; i++)
- {
- s->qw[R_QW + i] =
- (s->qw[i] >> LAST_R_QW_TRAIL) | (s->qw[i + 1] << LAST_R_QW_LEAD);
- }
-}
-
-ret_t
-compute_syndrome(OUT syndrome_t *syndrome, IN const ct_t *ct, IN const sk_t *sk)
-{
- // gf2x_mod_mul requires the values to be 64bit padded and extra (dbl) space
- // for the results
- DEFER_CLEANUP(dbl_pad_syndrome_t pad_s, dbl_pad_syndrome_cleanup);
- DEFER_CLEANUP(pad_sk_t pad_sk = {0}, pad_sk_cleanup);
- pad_sk[0].val = sk->bin[0];
- pad_sk[1].val = sk->bin[1];
-
- DEFER_CLEANUP(pad_ct_t pad_ct = {0}, pad_ct_cleanup);
- pad_ct[0].val = ct->val[0];
- pad_ct[1].val = ct->val[1];
-
- // Compute s = c0*h0 + c1*h1:
- GUARD(gf2x_mod_mul((uint64_t *)&pad_s[0], (uint64_t *)&pad_ct[0],
- (uint64_t *)&pad_sk[0]));
- GUARD(gf2x_mod_mul((uint64_t *)&pad_s[1], (uint64_t *)&pad_ct[1],
- (uint64_t *)&pad_sk[1]));
-
- GUARD(gf2x_add(pad_s[0].val.raw, pad_s[0].val.raw, pad_s[1].val.raw, R_SIZE));
-
- memcpy((uint8_t *)syndrome->qw, pad_s[0].val.raw, R_SIZE);
- dup(syndrome);
-
- return SUCCESS;
-}
-
-_INLINE_ ret_t
-recompute_syndrome(OUT syndrome_t *syndrome,
- IN const ct_t *ct,
- IN const sk_t *sk,
- IN const split_e_t *splitted_e)
-{
- ct_t tmp_ct = *ct;
-
- // Adapt the ciphertext
- GUARD(gf2x_add(tmp_ct.val[0].raw, tmp_ct.val[0].raw, splitted_e->val[0].raw,
- R_SIZE));
- GUARD(gf2x_add(tmp_ct.val[1].raw, tmp_ct.val[1].raw, splitted_e->val[1].raw,
- R_SIZE));
-
- // Recompute the syndrome
- GUARD(compute_syndrome(syndrome, &tmp_ct, sk));
-
- return SUCCESS;
-}
-
-_INLINE_ uint8_t
-get_threshold(IN const syndrome_t *s)
-{
- bike_static_assert(sizeof(*s) >= sizeof(r_t), syndrome_is_large_enough);
-
- const uint32_t syndrome_weight = r_bits_vector_weight((const r_t *)s->qw);
-
- // The equations below are defined in BIKE's specification:
- // https://bikesuite.org/files/round2/spec/BIKE-Spec-Round2.2019.03.30.pdf
- // Page 20 Section 2.4.2
- const uint8_t threshold =
- THRESHOLD_COEFF0 + (THRESHOLD_COEFF1 * syndrome_weight);
-
- DMSG(" Thresold: %d\n", threshold);
- return threshold;
-}
-
-// Use half-adder as described in [5].
-_INLINE_ void
-bit_sliced_adder(OUT upc_t *upc,
- IN OUT syndrome_t *rotated_syndrome,
- IN const size_t num_of_slices)
-{
- // From cache-memory perspective this loop should be the outside loop
- for(size_t j = 0; j < num_of_slices; j++)
- {
- for(size_t i = 0; i < R_QW; i++)
- {
- const uint64_t carry = (upc->slice[j].u.qw[i] & rotated_syndrome->qw[i]);
- upc->slice[j].u.qw[i] ^= rotated_syndrome->qw[i];
- rotated_syndrome->qw[i] = carry;
- }
- }
-}
-
-_INLINE_ void
-bit_slice_full_subtract(OUT upc_t *upc, IN uint8_t val)
-{
- // Borrow
- uint64_t br[R_QW] = {0};
-
- for(size_t j = 0; j < SLICES; j++)
- {
-
- const uint64_t lsb_mask = 0 - (val & 0x1);
- val >>= 1;
-
- // Perform a - b with c as the input/output carry
- // br = 0 0 0 0 1 1 1 1
- // a = 0 0 1 1 0 0 1 1
- // b = 0 1 0 1 0 1 0 1
- // -------------------
- // o = 0 1 1 0 0 1 1 1
- // c = 0 1 0 0 1 1 0 1
- //
- // o = a^b^c
- // _ __ _ _ _ _ _
- // br = abc + abc + abc + abc = abc + ((a+b))c
-
- for(size_t i = 0; i < R_QW; i++)
- {
- const uint64_t a = upc->slice[j].u.qw[i];
- const uint64_t b = lsb_mask;
- const uint64_t tmp = ((~a) & b & (~br[i])) | ((((~a) | b) & br[i]));
- upc->slice[j].u.qw[i] = a ^ b ^ br[i];
- br[i] = tmp;
- }
- }
-}
-
-// Calculate the Unsatisfied Parity Checks (UPCs) and update the errors
-// vector (e) accordingy. In addition, update the black and gray errors vector
-// with the relevant values.
-_INLINE_ void
-find_err1(OUT split_e_t *e,
- OUT split_e_t *black_e,
- OUT split_e_t *gray_e,
- IN const syndrome_t * syndrome,
- IN const compressed_idx_dv_ar_t wlist,
- IN const uint8_t threshold)
-{
- // This function uses the bit-slice-adder methodology of [5]:
- DEFER_CLEANUP(syndrome_t rotated_syndrome = {0}, syndrome_cleanup);
- DEFER_CLEANUP(upc_t upc, upc_cleanup);
-
- for(uint32_t i = 0; i < N0; i++)
- {
- // UPC must start from zero at every iteration
- memset(&upc, 0, sizeof(upc));
-
- // 1) Right-rotate the syndrome for every secret key set bit index
- // Then slice-add it to the UPC array.
- for(size_t j = 0; j < DV; j++)
- {
- rotate_right(&rotated_syndrome, syndrome, wlist[i].val[j]);
- bit_sliced_adder(&upc, &rotated_syndrome, LOG2_MSB(j + 1));
- }
-
- // 2) Subtract the threshold from the UPC counters
- bit_slice_full_subtract(&upc, threshold);
-
- // 3) Update the errors and the black errors vectors.
- // The last slice of the UPC array holds the MSB of the accumulated values
- // minus the threshold. Every zero bit indicates a potential error bit.
- // The errors values are stored in the black array and xored with the
- // errors Of the previous iteration.
- const r_t *last_slice = &(upc.slice[SLICES - 1].u.r.val);
- for(size_t j = 0; j < R_SIZE; j++)
- {
- const uint8_t sum_msb = (~last_slice->raw[j]);
- black_e->val[i].raw[j] = sum_msb;
- e->val[i].raw[j] ^= sum_msb;
- }
-
- // Ensure that the padding bits (upper bits of the last byte) are zero so
- // they will not be included in the multiplication and in the hash function.
- e->val[i].raw[R_SIZE - 1] &= LAST_R_BYTE_MASK;
-
- // 4) Calculate the gray error array by adding "DELTA" to the UPC array.
- // For that we reuse the rotated_syndrome variable setting it to all "1".
- for(size_t l = 0; l < DELTA; l++)
- {
- memset((uint8_t *)rotated_syndrome.qw, 0xff, R_SIZE);
- bit_sliced_adder(&upc, &rotated_syndrome, SLICES);
- }
-
- // 5) Update the gray list with the relevant bits that are not
- // set in the black list.
- for(size_t j = 0; j < R_SIZE; j++)
- {
- const uint8_t sum_msb = (~last_slice->raw[j]);
- gray_e->val[i].raw[j] = (~(black_e->val[i].raw[j])) & sum_msb;
- }
- }
-}
-
-// Recalculate the UPCs and update the errors vector (e) according to it
-// and to the black/gray vectors.
-_INLINE_ void
-find_err2(OUT split_e_t *e,
- IN split_e_t *pos_e,
- IN const syndrome_t * syndrome,
- IN const compressed_idx_dv_ar_t wlist,
- IN const uint8_t threshold)
-{
- DEFER_CLEANUP(syndrome_t rotated_syndrome = {0}, syndrome_cleanup);
- DEFER_CLEANUP(upc_t upc, upc_cleanup);
-
- for(uint32_t i = 0; i < N0; i++)
- {
- // UPC must start from zero at every iteration
- memset(&upc, 0, sizeof(upc));
-
- // 1) Right-rotate the syndrome for every secret key set bit index
- // Then slice-add it to the UPC array.
- for(size_t j = 0; j < DV; j++)
- {
- rotate_right(&rotated_syndrome, syndrome, wlist[i].val[j]);
- bit_sliced_adder(&upc, &rotated_syndrome, LOG2_MSB(j + 1));
- }
-
- // 2) Subtract the threshold from the UPC counters
- bit_slice_full_subtract(&upc, threshold);
-
- // 3) Update the errors vector.
- // The last slice of the UPC array holds the MSB of the accumulated values
- // minus the threshold. Every zero bit indicates a potential error bit.
- const r_t *last_slice = &(upc.slice[SLICES - 1].u.r.val);
- for(size_t j = 0; j < R_SIZE; j++)
- {
- const uint8_t sum_msb = (~last_slice->raw[j]);
- e->val[i].raw[j] ^= (pos_e->val[i].raw[j] & sum_msb);
- }
-
- // Ensure that the padding bits (upper bits of the last byte) are zero so
- // they will not be included in the multiplication and in the hash function.
- e->val[i].raw[R_SIZE - 1] &= LAST_R_BYTE_MASK;
- }
-}
-
-ret_t
-decode(OUT split_e_t *e,
- IN const syndrome_t *original_s,
- IN const ct_t *ct,
- IN const sk_t *sk)
-{
- split_e_t black_e = {0};
- split_e_t gray_e = {0};
- syndrome_t s;
-
- // Reset (init) the error because it is xored in the find_err funcitons.
- memset(e, 0, sizeof(*e));
- s = *original_s;
- dup(&s);
-
- for(uint32_t iter = 0; iter < MAX_IT; iter++)
- {
- const uint8_t threshold = get_threshold(&s);
-
- DMSG(" Iteration: %d\n", iter);
- DMSG(" Weight of e: %lu\n",
- r_bits_vector_weight(&e->val[0]) + r_bits_vector_weight(&e->val[1]));
- DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
-
- find_err1(e, &black_e, &gray_e, &s, sk->wlist, threshold);
- GUARD(recompute_syndrome(&s, ct, sk, e));
-#ifdef BGF_DECODER
- if(iter >= 1)
- {
- continue;
- }
-#endif
- DMSG(" Weight of e: %lu\n",
- r_bits_vector_weight(&e->val[0]) + r_bits_vector_weight(&e->val[1]));
- DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
-
- find_err2(e, &black_e, &s, sk->wlist, ((DV + 1) / 2) + 1);
- GUARD(recompute_syndrome(&s, ct, sk, e));
-
- DMSG(" Weight of e: %lu\n",
- r_bits_vector_weight(&e->val[0]) + r_bits_vector_weight(&e->val[1]));
- DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
-
- find_err2(e, &gray_e, &s, sk->wlist, ((DV + 1) / 2) + 1);
- GUARD(recompute_syndrome(&s, ct, sk, e));
- }
-
- if(r_bits_vector_weight((r_t *)s.qw) > 0)
- {
- BIKE_ERROR(E_DECODING_FAILURE);
- }
-
- return SUCCESS;
-}
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker, Shay Gueron, and Dusan Kostic,
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com, dkostic@amazon.com)
+ *
+ * [1] The optimizations are based on the description developed in the paper:
+ * Drucker, Nir, and Shay Gueron. 2019. “A Toolbox for Software Optimization
+ * of QC-MDPC Code-Based Cryptosystems.” Journal of Cryptographic Engineering,
+ * January, 1–17. https://doi.org/10.1007/s13389-018-00200-4.
+ *
+ * [2] The decoder algorithm is the Black-Gray decoder in
+ * the early submission of CAKE (due to N. Sandrier and R Misoczki).
+ *
+ * [3] The analysis for the constant time implementation is given in
+ * Drucker, Nir, Shay Gueron, and Dusan Kostic. 2019.
+ * “On Constant-Time QC-MDPC Decoding with Negligible Failure Rate.”
+ * Cryptology EPrint Archive, 2019. https://eprint.iacr.org/2019/1289.
+ *
+ * [4] it was adapted to BGF in:
+ * Drucker, Nir, Shay Gueron, and Dusan Kostic. 2019.
+ * “QC-MDPC decoders with several shades of gray.”
+ * Cryptology EPrint Archive, 2019. To be published.
+ *
+ * [5] Chou, T.: QcBits: Constant-Time Small-Key Code-Based Cryptography.
+ * In: Gier-lichs, B., Poschmann, A.Y. (eds.) Cryptographic Hardware
+ * and Embedded Systems– CHES 2016. pp. 280–300. Springer Berlin Heidelberg,
+ * Berlin, Heidelberg (2016)
+ *
+ * [6] The rotate512_small funciton is a derivative of the code described in:
+ * Guimarães, Antonio, Diego F Aranha, and Edson Borin. 2019.
+ * “Optimized Implementation of QC-MDPC Code-Based Cryptography.”
+ * Concurrency and Computation: Practice and Experience 31 (18):
+ * e5089. https://doi.org/10.1002/cpe.5089.
+ */
+
+#include "decode.h"
+#include "gf2x.h"
+#include "utilities.h"
+#include <string.h>
+
+// Decoding (bit-flipping) parameter
+#ifdef BG_DECODER
+# if(LEVEL == 1)
+# define MAX_IT 3
+# elif(LEVEL == 3)
+# define MAX_IT 4
+# elif(LEVEL == 5)
+# define MAX_IT 7
+# else
+# error "Level can only be 1/3/5"
+# endif
+#elif defined(BGF_DECODER)
+# if(LEVEL == 1)
+# define MAX_IT 5
+# elif(LEVEL == 3)
+# define MAX_IT 6
+# elif(LEVEL == 5)
+# define MAX_IT 7
+# else
+# error "Level can only be 1/3/5"
+# endif
+#endif
+
+// Duplicates the first R_BITS of the syndrome three times
+// |------------------------------------------|
+// | Third copy | Second copy | first R_BITS |
+// |------------------------------------------|
+// This is required by the rotate functions.
+_INLINE_ void
+dup(IN OUT syndrome_t *s)
+{
+ s->qw[R_QW - 1] =
+ (s->qw[0] << LAST_R_QW_LEAD) | (s->qw[R_QW - 1] & LAST_R_QW_MASK);
+
+ for(size_t i = 0; i < (2 * R_QW) - 1; i++)
+ {
+ s->qw[R_QW + i] =
+ (s->qw[i] >> LAST_R_QW_TRAIL) | (s->qw[i + 1] << LAST_R_QW_LEAD);
+ }
+}
+
+ret_t
+compute_syndrome(OUT syndrome_t *syndrome, IN const ct_t *ct, IN const sk_t *sk)
+{
+ // gf2x_mod_mul requires the values to be 64bit padded and extra (dbl) space
+ // for the results
+ DEFER_CLEANUP(dbl_pad_syndrome_t pad_s, dbl_pad_syndrome_cleanup);
+ DEFER_CLEANUP(pad_sk_t pad_sk = {0}, pad_sk_cleanup);
+ pad_sk[0].val = sk->bin[0];
+ pad_sk[1].val = sk->bin[1];
+
+ DEFER_CLEANUP(pad_ct_t pad_ct = {0}, pad_ct_cleanup);
+ pad_ct[0].val = ct->val[0];
+ pad_ct[1].val = ct->val[1];
+
+ // Compute s = c0*h0 + c1*h1:
+ GUARD(gf2x_mod_mul((uint64_t *)&pad_s[0], (uint64_t *)&pad_ct[0],
+ (uint64_t *)&pad_sk[0]));
+ GUARD(gf2x_mod_mul((uint64_t *)&pad_s[1], (uint64_t *)&pad_ct[1],
+ (uint64_t *)&pad_sk[1]));
+
+ GUARD(gf2x_add(pad_s[0].val.raw, pad_s[0].val.raw, pad_s[1].val.raw, R_SIZE));
+
+ memcpy((uint8_t *)syndrome->qw, pad_s[0].val.raw, R_SIZE);
+ dup(syndrome);
+
+ return SUCCESS;
+}
+
+_INLINE_ ret_t
+recompute_syndrome(OUT syndrome_t *syndrome,
+ IN const ct_t *ct,
+ IN const sk_t *sk,
+ IN const split_e_t *splitted_e)
+{
+ ct_t tmp_ct = *ct;
+
+ // Adapt the ciphertext
+ GUARD(gf2x_add(tmp_ct.val[0].raw, tmp_ct.val[0].raw, splitted_e->val[0].raw,
+ R_SIZE));
+ GUARD(gf2x_add(tmp_ct.val[1].raw, tmp_ct.val[1].raw, splitted_e->val[1].raw,
+ R_SIZE));
+
+ // Recompute the syndrome
+ GUARD(compute_syndrome(syndrome, &tmp_ct, sk));
+
+ return SUCCESS;
+}
+
+_INLINE_ uint8_t
+get_threshold(IN const syndrome_t *s)
+{
+ bike_static_assert(sizeof(*s) >= sizeof(r_t), syndrome_is_large_enough);
+
+ const uint32_t syndrome_weight = r_bits_vector_weight((const r_t *)s->qw);
+
+ // The equations below are defined in BIKE's specification:
+ // https://bikesuite.org/files/round2/spec/BIKE-Spec-Round2.2019.03.30.pdf
+ // Page 20 Section 2.4.2
+ const uint8_t threshold =
+ THRESHOLD_COEFF0 + (THRESHOLD_COEFF1 * syndrome_weight);
+
+ DMSG(" Thresold: %d\n", threshold);
+ return threshold;
+}
+
+// Use half-adder as described in [5].
+_INLINE_ void
+bit_sliced_adder(OUT upc_t *upc,
+ IN OUT syndrome_t *rotated_syndrome,
+ IN const size_t num_of_slices)
+{
+ // From cache-memory perspective this loop should be the outside loop
+ for(size_t j = 0; j < num_of_slices; j++)
+ {
+ for(size_t i = 0; i < R_QW; i++)
+ {
+ const uint64_t carry = (upc->slice[j].u.qw[i] & rotated_syndrome->qw[i]);
+ upc->slice[j].u.qw[i] ^= rotated_syndrome->qw[i];
+ rotated_syndrome->qw[i] = carry;
+ }
+ }
+}
+
+_INLINE_ void
+bit_slice_full_subtract(OUT upc_t *upc, IN uint8_t val)
+{
+ // Borrow
+ uint64_t br[R_QW] = {0};
+
+ for(size_t j = 0; j < SLICES; j++)
+ {
+
+ const uint64_t lsb_mask = 0 - (val & 0x1);
+ val >>= 1;
+
+ // Perform a - b with c as the input/output carry
+ // br = 0 0 0 0 1 1 1 1
+ // a = 0 0 1 1 0 0 1 1
+ // b = 0 1 0 1 0 1 0 1
+ // -------------------
+ // o = 0 1 1 0 0 1 1 1
+ // c = 0 1 0 0 1 1 0 1
+ //
+ // o = a^b^c
+ // _ __ _ _ _ _ _
+ // br = abc + abc + abc + abc = abc + ((a+b))c
+
+ for(size_t i = 0; i < R_QW; i++)
+ {
+ const uint64_t a = upc->slice[j].u.qw[i];
+ const uint64_t b = lsb_mask;
+ const uint64_t tmp = ((~a) & b & (~br[i])) | ((((~a) | b) & br[i]));
+ upc->slice[j].u.qw[i] = a ^ b ^ br[i];
+ br[i] = tmp;
+ }
+ }
+}
+
+// Calculate the Unsatisfied Parity Checks (UPCs) and update the errors
+// vector (e) accordingy. In addition, update the black and gray errors vector
+// with the relevant values.
+_INLINE_ void
+find_err1(OUT split_e_t *e,
+ OUT split_e_t *black_e,
+ OUT split_e_t *gray_e,
+ IN const syndrome_t * syndrome,
+ IN const compressed_idx_dv_ar_t wlist,
+ IN const uint8_t threshold)
+{
+ // This function uses the bit-slice-adder methodology of [5]:
+ DEFER_CLEANUP(syndrome_t rotated_syndrome = {0}, syndrome_cleanup);
+ DEFER_CLEANUP(upc_t upc, upc_cleanup);
+
+ for(uint32_t i = 0; i < N0; i++)
+ {
+ // UPC must start from zero at every iteration
+ memset(&upc, 0, sizeof(upc));
+
+ // 1) Right-rotate the syndrome for every secret key set bit index
+ // Then slice-add it to the UPC array.
+ for(size_t j = 0; j < DV; j++)
+ {
+ rotate_right(&rotated_syndrome, syndrome, wlist[i].val[j]);
+ bit_sliced_adder(&upc, &rotated_syndrome, LOG2_MSB(j + 1));
+ }
+
+ // 2) Subtract the threshold from the UPC counters
+ bit_slice_full_subtract(&upc, threshold);
+
+ // 3) Update the errors and the black errors vectors.
+ // The last slice of the UPC array holds the MSB of the accumulated values
+ // minus the threshold. Every zero bit indicates a potential error bit.
+ // The errors values are stored in the black array and xored with the
+ // errors Of the previous iteration.
+ const r_t *last_slice = &(upc.slice[SLICES - 1].u.r.val);
+ for(size_t j = 0; j < R_SIZE; j++)
+ {
+ const uint8_t sum_msb = (~last_slice->raw[j]);
+ black_e->val[i].raw[j] = sum_msb;
+ e->val[i].raw[j] ^= sum_msb;
+ }
+
+ // Ensure that the padding bits (upper bits of the last byte) are zero so
+ // they will not be included in the multiplication and in the hash function.
+ e->val[i].raw[R_SIZE - 1] &= LAST_R_BYTE_MASK;
+
+ // 4) Calculate the gray error array by adding "DELTA" to the UPC array.
+ // For that we reuse the rotated_syndrome variable setting it to all "1".
+ for(size_t l = 0; l < DELTA; l++)
+ {
+ memset((uint8_t *)rotated_syndrome.qw, 0xff, R_SIZE);
+ bit_sliced_adder(&upc, &rotated_syndrome, SLICES);
+ }
+
+ // 5) Update the gray list with the relevant bits that are not
+ // set in the black list.
+ for(size_t j = 0; j < R_SIZE; j++)
+ {
+ const uint8_t sum_msb = (~last_slice->raw[j]);
+ gray_e->val[i].raw[j] = (~(black_e->val[i].raw[j])) & sum_msb;
+ }
+ }
+}
+
+// Recalculate the UPCs and update the errors vector (e) according to it
+// and to the black/gray vectors.
+_INLINE_ void
+find_err2(OUT split_e_t *e,
+ IN split_e_t *pos_e,
+ IN const syndrome_t * syndrome,
+ IN const compressed_idx_dv_ar_t wlist,
+ IN const uint8_t threshold)
+{
+ DEFER_CLEANUP(syndrome_t rotated_syndrome = {0}, syndrome_cleanup);
+ DEFER_CLEANUP(upc_t upc, upc_cleanup);
+
+ for(uint32_t i = 0; i < N0; i++)
+ {
+ // UPC must start from zero at every iteration
+ memset(&upc, 0, sizeof(upc));
+
+ // 1) Right-rotate the syndrome for every secret key set bit index
+ // Then slice-add it to the UPC array.
+ for(size_t j = 0; j < DV; j++)
+ {
+ rotate_right(&rotated_syndrome, syndrome, wlist[i].val[j]);
+ bit_sliced_adder(&upc, &rotated_syndrome, LOG2_MSB(j + 1));
+ }
+
+ // 2) Subtract the threshold from the UPC counters
+ bit_slice_full_subtract(&upc, threshold);
+
+ // 3) Update the errors vector.
+ // The last slice of the UPC array holds the MSB of the accumulated values
+ // minus the threshold. Every zero bit indicates a potential error bit.
+ const r_t *last_slice = &(upc.slice[SLICES - 1].u.r.val);
+ for(size_t j = 0; j < R_SIZE; j++)
+ {
+ const uint8_t sum_msb = (~last_slice->raw[j]);
+ e->val[i].raw[j] ^= (pos_e->val[i].raw[j] & sum_msb);
+ }
+
+ // Ensure that the padding bits (upper bits of the last byte) are zero so
+ // they will not be included in the multiplication and in the hash function.
+ e->val[i].raw[R_SIZE - 1] &= LAST_R_BYTE_MASK;
+ }
+}
+
+ret_t
+decode(OUT split_e_t *e,
+ IN const syndrome_t *original_s,
+ IN const ct_t *ct,
+ IN const sk_t *sk)
+{
+ split_e_t black_e = {0};
+ split_e_t gray_e = {0};
+ syndrome_t s;
+
+ // Reset (init) the error because it is xored in the find_err funcitons.
+ memset(e, 0, sizeof(*e));
+ s = *original_s;
+ dup(&s);
+
+ for(uint32_t iter = 0; iter < MAX_IT; iter++)
+ {
+ const uint8_t threshold = get_threshold(&s);
+
+ DMSG(" Iteration: %d\n", iter);
+ DMSG(" Weight of e: %lu\n",
+ r_bits_vector_weight(&e->val[0]) + r_bits_vector_weight(&e->val[1]));
+ DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
+
+ find_err1(e, &black_e, &gray_e, &s, sk->wlist, threshold);
+ GUARD(recompute_syndrome(&s, ct, sk, e));
+#ifdef BGF_DECODER
+ if(iter >= 1)
+ {
+ continue;
+ }
+#endif
+ DMSG(" Weight of e: %lu\n",
+ r_bits_vector_weight(&e->val[0]) + r_bits_vector_weight(&e->val[1]));
+ DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
+
+ find_err2(e, &black_e, &s, sk->wlist, ((DV + 1) / 2) + 1);
+ GUARD(recompute_syndrome(&s, ct, sk, e));
+
+ DMSG(" Weight of e: %lu\n",
+ r_bits_vector_weight(&e->val[0]) + r_bits_vector_weight(&e->val[1]));
+ DMSG(" Weight of syndrome: %lu\n", r_bits_vector_weight((r_t *)s.qw));
+
+ find_err2(e, &gray_e, &s, sk->wlist, ((DV + 1) / 2) + 1);
+ GUARD(recompute_syndrome(&s, ct, sk, e));
+ }
+
+ if(r_bits_vector_weight((r_t *)s.qw) > 0)
+ {
+ BIKE_ERROR(E_DECODING_FAILURE);
+ }
+
+ return SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.h
index d8809fd829..db7cf8ec1b 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/decode.h
@@ -1,28 +1,28 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#pragma once
-
-#include "types.h"
-
-ret_t
-compute_syndrome(OUT syndrome_t *syndrome, IN const ct_t *ct, IN const sk_t *sk);
-
-// e should be zeroed before calling the decoder.
-ret_t
-decode(OUT split_e_t *e,
- IN const syndrome_t *s,
- IN const ct_t *ct,
- IN const sk_t *sk);
-
-// Rotate right the first R_BITS of a syndrome.
-// Assumption: the syndrome contains three R_BITS duplications.
-// The output syndrome contains only one R_BITS rotation, the other
-// (2 * R_BITS) bits are undefined.
-void
-rotate_right(OUT syndrome_t *out, IN const syndrome_t *in, IN uint32_t bitscount);
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#pragma once
+
+#include "types.h"
+
+ret_t
+compute_syndrome(OUT syndrome_t *syndrome, IN const ct_t *ct, IN const sk_t *sk);
+
+// e should be zeroed before calling the decoder.
+ret_t
+decode(OUT split_e_t *e,
+ IN const syndrome_t *s,
+ IN const ct_t *ct,
+ IN const sk_t *sk);
+
+// Rotate right the first R_BITS of a syndrome.
+// Assumption: the syndrome contains three R_BITS duplications.
+// The output syndrome contains only one R_BITS rotation, the other
+// (2 * R_BITS) bits are undefined.
+void
+rotate_right(OUT syndrome_t *out, IN const syndrome_t *in, IN uint32_t bitscount);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/defs.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/defs.h
index 0b74bb1131..c78ee90703 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/defs.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/defs.h
@@ -1,144 +1,144 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#pragma once
-
-////////////////////////////////////////////
-// Basic defs
-///////////////////////////////////////////
-#define FUNC_PREFIX BIKE1_L1_R2
-#include "functions_renaming.h"
-
-#ifdef __cplusplus
-# define EXTERNC extern "C"
-#else
-# define EXTERNC
-#endif
-
-// For code clarity.
-#define IN
-#define OUT
-
-#define ALIGN(n) __attribute__((aligned(n)))
-#define BIKE_UNUSED(x) (void)(x)
-#define BIKE_UNUSED_ATT __attribute__((unused))
-
-#define _INLINE_ static inline
-
-// In asm the symbols '==' and '?' are not allowed therefore if using
-// divide_and_ceil in asm files we must ensure with static_assert its validity
-#if(__cplusplus >= 201103L) || defined(static_assert)
-# define bike_static_assert(COND, MSG) static_assert(COND, "MSG")
-#else
-# define bike_static_assert(COND, MSG) \
- typedef char static_assertion_##MSG[(COND) ? 1 : -1] BIKE_UNUSED_ATT
-#endif
-
-// Divide by the divider and round up to next integer
-#define DIVIDE_AND_CEIL(x, divider) (((x) + (divider)) / (divider))
-
-#define BIT(len) (1ULL << (len))
-
-#define MASK(len) (BIT(len) - 1)
-#define SIZEOF_BITS(b) (sizeof(b) * 8)
-
-#define QW_SIZE 0x8
-#define XMM_SIZE 0x10
-#define YMM_SIZE 0x20
-#define ZMM_SIZE 0x40
-
-#define ALL_YMM_SIZE (16 * YMM_SIZE)
-#define ALL_ZMM_SIZE (32 * ZMM_SIZE)
-
-// Copied from (Kaz answer)
-// https://stackoverflow.com/questions/466204/rounding-up-to-next-power-of-2
-#define UPTOPOW2_0(v) ((v)-1)
-#define UPTOPOW2_1(v) (UPTOPOW2_0(v) | (UPTOPOW2_0(v) >> 1))
-#define UPTOPOW2_2(v) (UPTOPOW2_1(v) | (UPTOPOW2_1(v) >> 2))
-#define UPTOPOW2_3(v) (UPTOPOW2_2(v) | (UPTOPOW2_2(v) >> 4))
-#define UPTOPOW2_4(v) (UPTOPOW2_3(v) | (UPTOPOW2_3(v) >> 8))
-#define UPTOPOW2_5(v) (UPTOPOW2_4(v) | (UPTOPOW2_4(v) >> 16))
-
-#define UPTOPOW2(v) (UPTOPOW2_5(v) + 1)
-
-// Works only for 0 < v < 512
-#define LOG2_MSB(v) \
- ((v) == 0 \
- ? 0 \
- : ((v) < 2 \
- ? 1 \
- : ((v) < 4 \
- ? 2 \
- : ((v) < 8 \
- ? 3 \
- : ((v) < 16 \
- ? 4 \
- : ((v) < 32 \
- ? 5 \
- : ((v) < 64 ? 6 \
- : ((v) < 128 \
- ? 7 \
- : ((v) < 256 \
- ? 8 \
- : 9)))))))))
-
-////////////////////////////////////////////
-// Debug
-///////////////////////////////////////////
-
-#ifndef VERBOSE
-# define VERBOSE 0
-#endif
-
-#include <stdio.h>
-
-#if(VERBOSE == 4)
-# define MSG(...) \
- { \
- printf(__VA_ARGS__); \
- }
-# define DMSG(...) MSG(__VA_ARGS__)
-# define EDMSG(...) MSG(__VA_ARGS__)
-# define SEDMSG(...) MSG(__VA_ARGS__)
-#elif(VERBOSE == 3)
-# define MSG(...) \
- { \
- printf(__VA_ARGS__); \
- }
-# define DMSG(...) MSG(__VA_ARGS__)
-# define EDMSG(...) MSG(__VA_ARGS__)
-# define SEDMSG(...)
-#elif(VERBOSE == 2)
-# define MSG(...) \
- { \
- printf(__VA_ARGS__); \
- }
-# define DMSG(...) MSG(__VA_ARGS__)
-# define EDMSG(...)
-# define SEDMSG(...)
-#elif(VERBOSE == 1)
-# define MSG(...) \
- { \
- printf(__VA_ARGS__); \
- }
-# define DMSG(...)
-# define EDMSG(...)
-# define SEDMSG(...)
-#else
-# define MSG(...)
-# define DMSG(...)
-# define EDMSG(...)
-# define SEDMSG(...)
-#endif
-
-////////////////////////////////////////////
-// Printing
-///////////////////////////////////////////
-//#define PRINT_IN_BE
-//#define NO_SPACE
-//#define NO_NEWLINE
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#pragma once
+
+////////////////////////////////////////////
+// Basic defs
+///////////////////////////////////////////
+#define FUNC_PREFIX BIKE1_L1_R2
+#include "functions_renaming.h"
+
+#ifdef __cplusplus
+# define EXTERNC extern "C"
+#else
+# define EXTERNC
+#endif
+
+// For code clarity.
+#define IN
+#define OUT
+
+#define ALIGN(n) __attribute__((aligned(n)))
+#define BIKE_UNUSED(x) (void)(x)
+#define BIKE_UNUSED_ATT __attribute__((unused))
+
+#define _INLINE_ static inline
+
+// In asm the symbols '==' and '?' are not allowed therefore if using
+// divide_and_ceil in asm files we must ensure with static_assert its validity
+#if(__cplusplus >= 201103L) || defined(static_assert)
+# define bike_static_assert(COND, MSG) static_assert(COND, "MSG")
+#else
+# define bike_static_assert(COND, MSG) \
+ typedef char static_assertion_##MSG[(COND) ? 1 : -1] BIKE_UNUSED_ATT
+#endif
+
+// Divide by the divider and round up to next integer
+#define DIVIDE_AND_CEIL(x, divider) (((x) + (divider)) / (divider))
+
+#define BIT(len) (1ULL << (len))
+
+#define MASK(len) (BIT(len) - 1)
+#define SIZEOF_BITS(b) (sizeof(b) * 8)
+
+#define QW_SIZE 0x8
+#define XMM_SIZE 0x10
+#define YMM_SIZE 0x20
+#define ZMM_SIZE 0x40
+
+#define ALL_YMM_SIZE (16 * YMM_SIZE)
+#define ALL_ZMM_SIZE (32 * ZMM_SIZE)
+
+// Copied from (Kaz answer)
+// https://stackoverflow.com/questions/466204/rounding-up-to-next-power-of-2
+#define UPTOPOW2_0(v) ((v)-1)
+#define UPTOPOW2_1(v) (UPTOPOW2_0(v) | (UPTOPOW2_0(v) >> 1))
+#define UPTOPOW2_2(v) (UPTOPOW2_1(v) | (UPTOPOW2_1(v) >> 2))
+#define UPTOPOW2_3(v) (UPTOPOW2_2(v) | (UPTOPOW2_2(v) >> 4))
+#define UPTOPOW2_4(v) (UPTOPOW2_3(v) | (UPTOPOW2_3(v) >> 8))
+#define UPTOPOW2_5(v) (UPTOPOW2_4(v) | (UPTOPOW2_4(v) >> 16))
+
+#define UPTOPOW2(v) (UPTOPOW2_5(v) + 1)
+
+// Works only for 0 < v < 512
+#define LOG2_MSB(v) \
+ ((v) == 0 \
+ ? 0 \
+ : ((v) < 2 \
+ ? 1 \
+ : ((v) < 4 \
+ ? 2 \
+ : ((v) < 8 \
+ ? 3 \
+ : ((v) < 16 \
+ ? 4 \
+ : ((v) < 32 \
+ ? 5 \
+ : ((v) < 64 ? 6 \
+ : ((v) < 128 \
+ ? 7 \
+ : ((v) < 256 \
+ ? 8 \
+ : 9)))))))))
+
+////////////////////////////////////////////
+// Debug
+///////////////////////////////////////////
+
+#ifndef VERBOSE
+# define VERBOSE 0
+#endif
+
+#include <stdio.h>
+
+#if(VERBOSE == 4)
+# define MSG(...) \
+ { \
+ printf(__VA_ARGS__); \
+ }
+# define DMSG(...) MSG(__VA_ARGS__)
+# define EDMSG(...) MSG(__VA_ARGS__)
+# define SEDMSG(...) MSG(__VA_ARGS__)
+#elif(VERBOSE == 3)
+# define MSG(...) \
+ { \
+ printf(__VA_ARGS__); \
+ }
+# define DMSG(...) MSG(__VA_ARGS__)
+# define EDMSG(...) MSG(__VA_ARGS__)
+# define SEDMSG(...)
+#elif(VERBOSE == 2)
+# define MSG(...) \
+ { \
+ printf(__VA_ARGS__); \
+ }
+# define DMSG(...) MSG(__VA_ARGS__)
+# define EDMSG(...)
+# define SEDMSG(...)
+#elif(VERBOSE == 1)
+# define MSG(...) \
+ { \
+ printf(__VA_ARGS__); \
+ }
+# define DMSG(...)
+# define EDMSG(...)
+# define SEDMSG(...)
+#else
+# define MSG(...)
+# define DMSG(...)
+# define EDMSG(...)
+# define SEDMSG(...)
+#endif
+
+////////////////////////////////////////////
+// Printing
+///////////////////////////////////////////
+//#define PRINT_IN_BE
+//#define NO_SPACE
+//#define NO_NEWLINE
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/error.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/error.c
index b048fc06a2..0d8e5b25ce 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/error.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/error.c
@@ -1,11 +1,11 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#include "error.h"
-
-__thread _bike_err_t bike_errno;
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#include "error.h"
+
+__thread _bike_err_t bike_errno;
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/error.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/error.h
index eac4e2daee..19d0bb1d9b 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/error.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/error.h
@@ -1,36 +1,36 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#pragma once
-
-#include "defs.h"
-
-#define SUCCESS 0
-#define FAIL (-1)
-
-#define ret_t int
-
-enum _bike_err
-{
- E_ERROR_WEIGHT_IS_NOT_T = 1,
- E_DECODING_FAILURE = 2,
- E_AES_CTR_PRF_INIT_FAIL = 3,
- E_AES_OVER_USED = 4,
- EXTERNAL_LIB_ERROR_OPENSSL = 5,
- E_FAIL_TO_GET_SEED = 6
-};
-
-typedef enum _bike_err _bike_err_t;
-
-extern __thread _bike_err_t bike_errno;
-#define BIKE_ERROR(x) \
- do \
- { \
- bike_errno = (x); \
- return FAIL; \
- } while(0)
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#pragma once
+
+#include "defs.h"
+
+#define SUCCESS 0
+#define FAIL (-1)
+
+#define ret_t int
+
+enum _bike_err
+{
+ E_ERROR_WEIGHT_IS_NOT_T = 1,
+ E_DECODING_FAILURE = 2,
+ E_AES_CTR_PRF_INIT_FAIL = 3,
+ E_AES_OVER_USED = 4,
+ EXTERNAL_LIB_ERROR_OPENSSL = 5,
+ E_FAIL_TO_GET_SEED = 6
+};
+
+typedef enum _bike_err _bike_err_t;
+
+extern __thread _bike_err_t bike_errno;
+#define BIKE_ERROR(x) \
+ do \
+ { \
+ bike_errno = (x); \
+ return FAIL; \
+ } while(0)
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/functions_renaming.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/functions_renaming.h
index f11aa90e14..09c8385803 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/functions_renaming.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/functions_renaming.h
@@ -1,60 +1,60 @@
-/*
- * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://aws.amazon.com/apache2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- * The license is detailed in the file LICENSE.md, and applies to this file.
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#ifndef __FUNCTIONS_RENAMING_H_INCLUDED__
-#define __FUNCTIONS_RENAMING_H_INCLUDED__
-
-#define PASTER(x, y) x##_##y
-#define EVALUATOR(x, y) PASTER(x, y)
-#define RENAME_FUNC_NAME(fname) EVALUATOR(FUNC_PREFIX, fname)
-
-#define keypair RENAME_FUNC_NAME(keypair)
-#define decaps RENAME_FUNC_NAME(decaps)
-#define encaps RENAME_FUNC_NAME(encaps)
-
-#define aes_ctr_prf RENAME_FUNC_NAME(aes_ctr_prf)
-#define sample_uniform_r_bits_with_fixed_prf_context \
- RENAME_FUNC_NAME(sample_uniform_r_bits_with_fixed_prf_context)
-#define init_aes_ctr_prf_state RENAME_FUNC_NAME(init_aes_ctr_prf_state)
-#define generate_sparse_rep RENAME_FUNC_NAME(generate_sparse_rep)
-#define parallel_hash RENAME_FUNC_NAME(parallel_hash)
-#define decode RENAME_FUNC_NAME(decode)
-#define print_BE RENAME_FUNC_NAME(print_BE)
-#define print_LE RENAME_FUNC_NAME(print_LE)
-#define gf2x_mod_mul RENAME_FUNC_NAME(gf2x_mod_mul)
-#define secure_set_bits RENAME_FUNC_NAME(secure_set_bits)
-#define sha RENAME_FUNC_NAME(sha)
-#define count_ones RENAME_FUNC_NAME(count_ones)
-#define sha_mb RENAME_FUNC_NAME(sha_mb)
-#define split_e RENAME_FUNC_NAME(split_e)
-#define compute_syndrome RENAME_FUNC_NAME(compute_syndrome)
-#define bike_errno RENAME_FUNC_NAME(bike_errno)
-#define cyclic_product RENAME_FUNC_NAME(cyclic_product)
-#define ossl_add RENAME_FUNC_NAME(ossl_add)
-#define karatzuba_add1 RENAME_FUNC_NAME(karatzuba_add1)
-#define karatzuba_add2 RENAME_FUNC_NAME(karatzuba_add2)
-#define gf2x_add RENAME_FUNC_NAME(gf2x_add)
-#define gf2_muladd_4x4 RENAME_FUNC_NAME(gf2_muladd_4x4)
-#define red RENAME_FUNC_NAME(red)
-#define gf2x_mul_1x1 RENAME_FUNC_NAME(gf2x_mul_1x1)
-#define rotate_right RENAME_FUNC_NAME(rotate_right)
-#define r_bits_vector_weight RENAME_FUNC_NAME(r_bits_vector_weight)
-
-#endif //__FUNCTIONS_RENAMING_H_INCLUDED__
+/*
+ * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ * The license is detailed in the file LICENSE.md, and applies to this file.
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#ifndef __FUNCTIONS_RENAMING_H_INCLUDED__
+#define __FUNCTIONS_RENAMING_H_INCLUDED__
+
+#define PASTER(x, y) x##_##y
+#define EVALUATOR(x, y) PASTER(x, y)
+#define RENAME_FUNC_NAME(fname) EVALUATOR(FUNC_PREFIX, fname)
+
+#define keypair RENAME_FUNC_NAME(keypair)
+#define decaps RENAME_FUNC_NAME(decaps)
+#define encaps RENAME_FUNC_NAME(encaps)
+
+#define aes_ctr_prf RENAME_FUNC_NAME(aes_ctr_prf)
+#define sample_uniform_r_bits_with_fixed_prf_context \
+ RENAME_FUNC_NAME(sample_uniform_r_bits_with_fixed_prf_context)
+#define init_aes_ctr_prf_state RENAME_FUNC_NAME(init_aes_ctr_prf_state)
+#define generate_sparse_rep RENAME_FUNC_NAME(generate_sparse_rep)
+#define parallel_hash RENAME_FUNC_NAME(parallel_hash)
+#define decode RENAME_FUNC_NAME(decode)
+#define print_BE RENAME_FUNC_NAME(print_BE)
+#define print_LE RENAME_FUNC_NAME(print_LE)
+#define gf2x_mod_mul RENAME_FUNC_NAME(gf2x_mod_mul)
+#define secure_set_bits RENAME_FUNC_NAME(secure_set_bits)
+#define sha RENAME_FUNC_NAME(sha)
+#define count_ones RENAME_FUNC_NAME(count_ones)
+#define sha_mb RENAME_FUNC_NAME(sha_mb)
+#define split_e RENAME_FUNC_NAME(split_e)
+#define compute_syndrome RENAME_FUNC_NAME(compute_syndrome)
+#define bike_errno RENAME_FUNC_NAME(bike_errno)
+#define cyclic_product RENAME_FUNC_NAME(cyclic_product)
+#define ossl_add RENAME_FUNC_NAME(ossl_add)
+#define karatzuba_add1 RENAME_FUNC_NAME(karatzuba_add1)
+#define karatzuba_add2 RENAME_FUNC_NAME(karatzuba_add2)
+#define gf2x_add RENAME_FUNC_NAME(gf2x_add)
+#define gf2_muladd_4x4 RENAME_FUNC_NAME(gf2_muladd_4x4)
+#define red RENAME_FUNC_NAME(red)
+#define gf2x_mul_1x1 RENAME_FUNC_NAME(gf2x_mul_1x1)
+#define rotate_right RENAME_FUNC_NAME(rotate_right)
+#define r_bits_vector_weight RENAME_FUNC_NAME(r_bits_vector_weight)
+
+#endif //__FUNCTIONS_RENAMING_H_INCLUDED__
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x.h
index 2de0050ff6..7fb1695058 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x.h
@@ -1,55 +1,55 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron,
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#pragma once
-
-#include "types.h"
-
-#ifdef USE_OPENSSL
-# include "openssl_utils.h"
-#endif
-
-#ifdef USE_OPENSSL_GF2M
-// res = a*b mod (x^r - 1)
-// Note: the caller must allocate twice the size of res.
-_INLINE_ ret_t
-gf2x_mod_mul(OUT uint64_t *res, IN const uint64_t *a, IN const uint64_t *b)
-{
- return cyclic_product((uint8_t *)res, (const uint8_t *)a, (const uint8_t *)b);
-}
-
-// A wrapper for other gf2x_add implementations.
-_INLINE_ ret_t
-gf2x_add(OUT uint8_t *res,
- IN const uint8_t *a,
- IN const uint8_t *b,
- IN const uint64_t size)
-{
- BIKE_UNUSED(size);
- return ossl_add((uint8_t *)res, a, b);
-}
-#else // USE_OPENSSL_GF2M
-
-_INLINE_ ret_t
-gf2x_add(OUT uint8_t *res,
- IN const uint8_t *a,
- IN const uint8_t *b,
- IN const uint64_t bytelen)
-{
- for(uint64_t i = 0; i < bytelen; i++)
- {
- res[i] = a[i] ^ b[i];
- }
- return SUCCESS;
-}
-
-// res = a*b mod (x^r - 1)
-// the caller must allocate twice the size of res!
-ret_t
-gf2x_mod_mul(OUT uint64_t *res, IN const uint64_t *a, IN const uint64_t *b);
-#endif
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron,
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#pragma once
+
+#include "types.h"
+
+#ifdef USE_OPENSSL
+# include "openssl_utils.h"
+#endif
+
+#ifdef USE_OPENSSL_GF2M
+// res = a*b mod (x^r - 1)
+// Note: the caller must allocate twice the size of res.
+_INLINE_ ret_t
+gf2x_mod_mul(OUT uint64_t *res, IN const uint64_t *a, IN const uint64_t *b)
+{
+ return cyclic_product((uint8_t *)res, (const uint8_t *)a, (const uint8_t *)b);
+}
+
+// A wrapper for other gf2x_add implementations.
+_INLINE_ ret_t
+gf2x_add(OUT uint8_t *res,
+ IN const uint8_t *a,
+ IN const uint8_t *b,
+ IN const uint64_t size)
+{
+ BIKE_UNUSED(size);
+ return ossl_add((uint8_t *)res, a, b);
+}
+#else // USE_OPENSSL_GF2M
+
+_INLINE_ ret_t
+gf2x_add(OUT uint8_t *res,
+ IN const uint8_t *a,
+ IN const uint8_t *b,
+ IN const uint64_t bytelen)
+{
+ for(uint64_t i = 0; i < bytelen; i++)
+ {
+ res[i] = a[i] ^ b[i];
+ }
+ return SUCCESS;
+}
+
+// res = a*b mod (x^r - 1)
+// the caller must allocate twice the size of res!
+ret_t
+gf2x_mod_mul(OUT uint64_t *res, IN const uint64_t *a, IN const uint64_t *b);
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_internal.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_internal.h
index 74fc5b9932..779e7f9727 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_internal.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_internal.h
@@ -1,32 +1,32 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron,
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#pragma once
-
-#include "types.h"
-
-EXTERNC void
-karatzuba_add1(OUT uint64_t *res,
- IN const uint64_t *a,
- IN const uint64_t *b,
- IN uint64_t n_half,
- IN uint64_t *alah);
-
-EXTERNC void
-karatzuba_add2(OUT uint64_t *res1,
- OUT uint64_t *res2,
- IN const uint64_t *res,
- IN const uint64_t *tmp,
- IN uint64_t n_half);
-
-EXTERNC void
-red(uint64_t *res);
-
-void
-
-gf2x_mul_1x1(OUT uint64_t *res, IN uint64_t a, IN uint64_t b);
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron,
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#pragma once
+
+#include "types.h"
+
+EXTERNC void
+karatzuba_add1(OUT uint64_t *res,
+ IN const uint64_t *a,
+ IN const uint64_t *b,
+ IN uint64_t n_half,
+ IN uint64_t *alah);
+
+EXTERNC void
+karatzuba_add2(OUT uint64_t *res1,
+ OUT uint64_t *res2,
+ IN const uint64_t *res,
+ IN const uint64_t *tmp,
+ IN uint64_t n_half);
+
+EXTERNC void
+red(uint64_t *res);
+
+void
+
+gf2x_mul_1x1(OUT uint64_t *res, IN uint64_t a, IN uint64_t b);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_mul.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_mul.c
index 84a79589db..81e55a3366 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_mul.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_mul.c
@@ -1,97 +1,97 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron,
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#include "cleanup.h"
-#include "gf2x.h"
-#include "gf2x_internal.h"
-#include <stdlib.h>
-#include <string.h>
-
-#ifndef USE_OPENSSL_GF2M
-
-// All the temporary data (which might hold secrets)
-// is stored on a secure buffer, so that it can be easily cleaned up later.
-// The secure buffer required is: 3n/2 (alah|blbh|tmp) in a recursive way.
-// 3n/2 + 3n/4 + 3n/8 = 3(n/2 + n/4 + n/8) < 3n
-# define SECURE_BUFFER_SIZE (3 * R_PADDED_SIZE)
-
-// Calculate number of uint64_t values needed to store SECURE_BUFFER_SIZE bytes. Rounding up to the next whole integer.
-# define SECURE_BUFFER_SIZE_64_BIT ((SECURE_BUFFER_SIZE / sizeof(uint64_t)) + ((SECURE_BUFFER_SIZE % sizeof(uint64_t)) != 0))
-
-// This functions assumes that n is even.
-_INLINE_ void
-karatzuba(OUT uint64_t *res,
- IN const uint64_t *a,
- IN const uint64_t *b,
- IN const uint64_t n,
- uint64_t * secure_buf)
-{
- if(1 == n)
- {
- gf2x_mul_1x1(res, a[0], b[0]);
- return;
- }
-
- const uint64_t half_n = n >> 1;
-
- // Define pointers for the middle of each parameter
- // sepearting a=a_low and a_high (same for ba nd res)
- const uint64_t *a_high = a + half_n;
- const uint64_t *b_high = b + half_n;
-
- // Divide res into 4 parts res3|res2|res1|res in size n/2
- uint64_t *res1 = res + half_n;
- uint64_t *res2 = res1 + half_n;
-
- // All three parameters below are allocated on the secure buffer
- // All of them are in size half n
- uint64_t *alah = secure_buf;
- uint64_t *blbh = alah + half_n;
- uint64_t *tmp = blbh + half_n;
-
- // Place the secure buffer ptr in the first free location,
- // so the recursive function can use it.
- secure_buf = tmp + half_n;
-
- // Calculate Z0 and store the result in res(low)
- karatzuba(res, a, b, half_n, secure_buf);
-
- // Calculate Z2 and store the result in res(high)
- karatzuba(res2, a_high, b_high, half_n, secure_buf);
-
- // Accomulate the results.
- karatzuba_add1(res, a, b, half_n, alah);
-
- // (a_low + a_high)(b_low + b_high) --> res1
- karatzuba(res1, alah, blbh, half_n, secure_buf);
-
- karatzuba_add2(res1, res2, res, tmp, half_n);
-}
-
-ret_t
-gf2x_mod_mul(OUT uint64_t *res, IN const uint64_t *a, IN const uint64_t *b)
-{
- bike_static_assert((R_PADDED_QW % 2 == 0), karatzuba_n_is_odd);
-
- ALIGN(sizeof(uint64_t)) uint64_t secure_buffer[SECURE_BUFFER_SIZE_64_BIT];
-
- /* make sure we have the correct size allocation. */
- bike_static_assert(sizeof(secure_buffer) % sizeof(uint64_t) == 0,
- secure_buffer_not_eligable_for_uint64_t);
-
- karatzuba(res, a, b, R_PADDED_QW, (uint64_t *)secure_buffer);
-
- // This function implicitly assumes that the size of res is 2*R_PADDED_QW.
- red(res);
-
- secure_clean((uint8_t*)secure_buffer, sizeof(secure_buffer));
-
- return SUCCESS;
-}
-
-#endif // USE_OPENSSL_GF2M
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron,
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#include "cleanup.h"
+#include "gf2x.h"
+#include "gf2x_internal.h"
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef USE_OPENSSL_GF2M
+
+// All the temporary data (which might hold secrets)
+// is stored on a secure buffer, so that it can be easily cleaned up later.
+// The secure buffer required is: 3n/2 (alah|blbh|tmp) in a recursive way.
+// 3n/2 + 3n/4 + 3n/8 = 3(n/2 + n/4 + n/8) < 3n
+# define SECURE_BUFFER_SIZE (3 * R_PADDED_SIZE)
+
+// Calculate number of uint64_t values needed to store SECURE_BUFFER_SIZE bytes. Rounding up to the next whole integer.
+# define SECURE_BUFFER_SIZE_64_BIT ((SECURE_BUFFER_SIZE / sizeof(uint64_t)) + ((SECURE_BUFFER_SIZE % sizeof(uint64_t)) != 0))
+
+// This functions assumes that n is even.
+_INLINE_ void
+karatzuba(OUT uint64_t *res,
+ IN const uint64_t *a,
+ IN const uint64_t *b,
+ IN const uint64_t n,
+ uint64_t * secure_buf)
+{
+ if(1 == n)
+ {
+ gf2x_mul_1x1(res, a[0], b[0]);
+ return;
+ }
+
+ const uint64_t half_n = n >> 1;
+
+ // Define pointers for the middle of each parameter
+ // sepearting a=a_low and a_high (same for ba nd res)
+ const uint64_t *a_high = a + half_n;
+ const uint64_t *b_high = b + half_n;
+
+ // Divide res into 4 parts res3|res2|res1|res in size n/2
+ uint64_t *res1 = res + half_n;
+ uint64_t *res2 = res1 + half_n;
+
+ // All three parameters below are allocated on the secure buffer
+ // All of them are in size half n
+ uint64_t *alah = secure_buf;
+ uint64_t *blbh = alah + half_n;
+ uint64_t *tmp = blbh + half_n;
+
+ // Place the secure buffer ptr in the first free location,
+ // so the recursive function can use it.
+ secure_buf = tmp + half_n;
+
+ // Calculate Z0 and store the result in res(low)
+ karatzuba(res, a, b, half_n, secure_buf);
+
+ // Calculate Z2 and store the result in res(high)
+ karatzuba(res2, a_high, b_high, half_n, secure_buf);
+
+ // Accomulate the results.
+ karatzuba_add1(res, a, b, half_n, alah);
+
+ // (a_low + a_high)(b_low + b_high) --> res1
+ karatzuba(res1, alah, blbh, half_n, secure_buf);
+
+ karatzuba_add2(res1, res2, res, tmp, half_n);
+}
+
+ret_t
+gf2x_mod_mul(OUT uint64_t *res, IN const uint64_t *a, IN const uint64_t *b)
+{
+ bike_static_assert((R_PADDED_QW % 2 == 0), karatzuba_n_is_odd);
+
+ ALIGN(sizeof(uint64_t)) uint64_t secure_buffer[SECURE_BUFFER_SIZE_64_BIT];
+
+ /* make sure we have the correct size allocation. */
+ bike_static_assert(sizeof(secure_buffer) % sizeof(uint64_t) == 0,
+ secure_buffer_not_eligable_for_uint64_t);
+
+ karatzuba(res, a, b, R_PADDED_QW, (uint64_t *)secure_buffer);
+
+ // This function implicitly assumes that the size of res is 2*R_PADDED_QW.
+ red(res);
+
+ secure_clean((uint8_t*)secure_buffer, sizeof(secure_buffer));
+
+ return SUCCESS;
+}
+
+#endif // USE_OPENSSL_GF2M
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_portable.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_portable.c
index 1816da6e77..f59361f192 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_portable.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/gf2x_portable.c
@@ -1,108 +1,108 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron,
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#include "gf2x.h"
-#include "utilities.h"
-
-#if !defined(USE_OPENSSL_GF2M)
-
-// The algorithm is based on the windowing method, for example as in:
-// Brent, R. P., Gaudry, P., Thomé, E., & Zimmermann, P. (2008, May), "Faster
-// multiplication in GF (2)[x]". In: International Algorithmic Number Theory
-// Symposium (pp. 153-166). Springer, Berlin, Heidelberg. In this implementation,
-// the last three bits are multiplied using a schoolbook multiplicaiton.
-void
-gf2x_mul_1x1(uint64_t *c, uint64_t a, uint64_t b)
-{
- uint64_t h = 0, l = 0, u[8];
- const uint64_t w = 64;
- const uint64_t s = 3;
- // Multiplying 64 bits by 7 can results in an overflow of 3 bits.
- // Therefore, these bits are masked out, and are treated in step 3.
- const uint64_t b0 = b & 0x1fffffffffffffff;
-
- // Step 1: Calculate a multiplication table with 8 entries.
- u[0] = 0;
- u[1] = b0;
- u[2] = u[1] << 1;
- u[3] = u[2] ^ b0;
- u[4] = u[2] << 1;
- u[5] = u[4] ^ b0;
- u[6] = u[3] << 1;
- u[7] = u[6] ^ b0;
-
- // Step 2: Multiply two elements in parallel in poisitions i,i+s
- l = u[a & 7] ^ (u[(a >> 3) & 7] << 3);
- h = (u[(a >> 3) & 7] >> 61);
- for(uint32_t i = (2 * s); i < w; i += (2 * s))
- {
- uint64_t g1 = u[(a >> i) & 7];
- uint64_t g2 = u[(a >> (i + s)) & 7];
-
- l ^= (g1 << i) ^ (g2 << (i + s));
- h ^= (g1 >> (w - i)) ^ (g2 >> (w - (i + s)));
- }
-
- // Step 3: Multiply the last three bits.
- for(uint8_t i = 61; i < 64; i++)
- {
- uint64_t mask = (-((b >> i) & 1));
- l ^= ((a << i) & mask);
- h ^= ((a >> (w - i)) & mask);
- }
-
- c[0] = l;
- c[1] = h;
-}
-
-void
-karatzuba_add1(OUT const uint64_t *res,
- IN const uint64_t *a,
- IN const uint64_t *b,
- IN const uint64_t n_half,
- IN uint64_t *alah)
-{
- for(uint32_t j = 0; j < n_half; j++)
- {
- alah[j + 0 * n_half] = a[j] ^ a[n_half + j];
- alah[j + 1 * n_half] = b[j] ^ b[n_half + j];
- alah[j + 2 * n_half] = res[n_half + j] ^ res[2 * n_half + j];
- }
-}
-
-void
-karatzuba_add2(OUT uint64_t *res1,
- OUT uint64_t *res2,
- IN const uint64_t *res,
- IN const uint64_t *tmp,
- IN const uint64_t n_half)
-{
- for(uint32_t j = 0; j < n_half; j++)
- {
- res1[j] ^= res[j] ^ tmp[j];
- res2[j] ^= res2[n_half + j] ^ tmp[j];
- }
-}
-
-void
-red(uint64_t *a)
-{
- for(uint32_t i = 0; i < R_QW; i++)
- {
- const uint64_t temp0 = a[R_QW + i - 1];
- const uint64_t temp1 = a[R_QW + i];
- a[i] ^= (temp0 >> LAST_R_QW_LEAD) | (temp1 << LAST_R_QW_TRAIL);
- }
-
- a[R_QW - 1] &= LAST_R_QW_MASK;
-
- // Clean the secrets from the upper half of a.
- secure_clean((uint8_t *)&a[R_QW], sizeof(uint64_t) * R_QW);
-}
-
-#endif
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron,
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#include "gf2x.h"
+#include "utilities.h"
+
+#if !defined(USE_OPENSSL_GF2M)
+
+// The algorithm is based on the windowing method, for example as in:
+// Brent, R. P., Gaudry, P., Thomé, E., & Zimmermann, P. (2008, May), "Faster
+// multiplication in GF (2)[x]". In: International Algorithmic Number Theory
+// Symposium (pp. 153-166). Springer, Berlin, Heidelberg. In this implementation,
+// the last three bits are multiplied using a schoolbook multiplicaiton.
+void
+gf2x_mul_1x1(uint64_t *c, uint64_t a, uint64_t b)
+{
+ uint64_t h = 0, l = 0, u[8];
+ const uint64_t w = 64;
+ const uint64_t s = 3;
+ // Multiplying 64 bits by 7 can results in an overflow of 3 bits.
+ // Therefore, these bits are masked out, and are treated in step 3.
+ const uint64_t b0 = b & 0x1fffffffffffffff;
+
+ // Step 1: Calculate a multiplication table with 8 entries.
+ u[0] = 0;
+ u[1] = b0;
+ u[2] = u[1] << 1;
+ u[3] = u[2] ^ b0;
+ u[4] = u[2] << 1;
+ u[5] = u[4] ^ b0;
+ u[6] = u[3] << 1;
+ u[7] = u[6] ^ b0;
+
+ // Step 2: Multiply two elements in parallel in poisitions i,i+s
+ l = u[a & 7] ^ (u[(a >> 3) & 7] << 3);
+ h = (u[(a >> 3) & 7] >> 61);
+ for(uint32_t i = (2 * s); i < w; i += (2 * s))
+ {
+ uint64_t g1 = u[(a >> i) & 7];
+ uint64_t g2 = u[(a >> (i + s)) & 7];
+
+ l ^= (g1 << i) ^ (g2 << (i + s));
+ h ^= (g1 >> (w - i)) ^ (g2 >> (w - (i + s)));
+ }
+
+ // Step 3: Multiply the last three bits.
+ for(uint8_t i = 61; i < 64; i++)
+ {
+ uint64_t mask = (-((b >> i) & 1));
+ l ^= ((a << i) & mask);
+ h ^= ((a >> (w - i)) & mask);
+ }
+
+ c[0] = l;
+ c[1] = h;
+}
+
+void
+karatzuba_add1(OUT const uint64_t *res,
+ IN const uint64_t *a,
+ IN const uint64_t *b,
+ IN const uint64_t n_half,
+ IN uint64_t *alah)
+{
+ for(uint32_t j = 0; j < n_half; j++)
+ {
+ alah[j + 0 * n_half] = a[j] ^ a[n_half + j];
+ alah[j + 1 * n_half] = b[j] ^ b[n_half + j];
+ alah[j + 2 * n_half] = res[n_half + j] ^ res[2 * n_half + j];
+ }
+}
+
+void
+karatzuba_add2(OUT uint64_t *res1,
+ OUT uint64_t *res2,
+ IN const uint64_t *res,
+ IN const uint64_t *tmp,
+ IN const uint64_t n_half)
+{
+ for(uint32_t j = 0; j < n_half; j++)
+ {
+ res1[j] ^= res[j] ^ tmp[j];
+ res2[j] ^= res2[n_half + j] ^ tmp[j];
+ }
+}
+
+void
+red(uint64_t *a)
+{
+ for(uint32_t i = 0; i < R_QW; i++)
+ {
+ const uint64_t temp0 = a[R_QW + i - 1];
+ const uint64_t temp1 = a[R_QW + i];
+ a[i] ^= (temp0 >> LAST_R_QW_LEAD) | (temp1 << LAST_R_QW_TRAIL);
+ }
+
+ a[R_QW - 1] &= LAST_R_QW_MASK;
+
+ // Clean the secrets from the upper half of a.
+ secure_clean((uint8_t *)&a[R_QW], sizeof(uint64_t) * R_QW);
+}
+
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.c
index 09e0af3fde..a2a97c4651 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.c
@@ -1,187 +1,187 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron,
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#include "openssl_utils.h"
-#include "utilities.h"
-#include <assert.h>
-#include <openssl/bn.h>
-#include <string.h>
-
-#ifdef USE_OPENSSL_GF2M
-
-# define MAX_OPENSSL_INV_TRIALS 1000
-
-_INLINE_ void
-BN_CTX_cleanup(BN_CTX *ctx)
-{
- if(ctx)
- {
- BN_CTX_end(ctx);
- BN_CTX_free(ctx);
- }
-}
-
-DEFINE_POINTER_CLEANUP_FUNC(BN_CTX *, BN_CTX_cleanup);
-
-// Loading (big) numbers into OpenSSL should use Big Endian representation.
-// Therefore, the bytes ordering of the number should be reversed.
-_INLINE_ void
-reverse_endian(OUT uint8_t *res, IN const uint8_t *in, IN const uint32_t n)
-{
- uint32_t i;
-
- for(i = 0; i < (n / 2); i++)
- {
- uint64_t tmp = in[i];
- res[i] = in[n - 1 - i];
- res[n - 1 - i] = tmp;
- }
-
- // If the number of blocks is odd, swap also the middle block.
- if(n % 2)
- {
- res[i] = in[i];
- }
-}
-
-_INLINE_ ret_t
-ossl_bn2bin(OUT uint8_t *out, IN const BIGNUM *in, IN const uint32_t size)
-{
- assert(size <= N_SIZE);
- uint8_t be_tmp[N_SIZE] = {0};
-
- memset(out, 0, size);
-
- if(BN_bn2bin(in, be_tmp) == -1)
- {
- BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
- }
- reverse_endian(out, be_tmp, BN_num_bytes(in));
-
- return SUCCESS;
-}
-
-_INLINE_ ret_t
-ossl_bin2bn(IN BIGNUM *out, OUT const uint8_t *in, IN const uint32_t size)
-{
- assert(size <= N_SIZE);
- uint8_t be_tmp[N_SIZE] = {0};
-
- reverse_endian(be_tmp, in, size);
-
- if(BN_bin2bn(be_tmp, size, out) == 0)
- {
- BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
- }
-
- return SUCCESS;
-}
-
-ret_t
-ossl_add(OUT uint8_t res_bin[R_SIZE],
- IN const uint8_t a_bin[R_SIZE],
- IN const uint8_t b_bin[R_SIZE])
-{
- DEFER_CLEANUP(BN_CTX *bn_ctx = BN_CTX_new(), BN_CTX_cleanup_pointer);
- BIGNUM *r = NULL;
- BIGNUM *a = NULL;
- BIGNUM *b = NULL;
-
- if(NULL == bn_ctx)
- {
- BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
- }
-
- BN_CTX_start(bn_ctx);
-
- r = BN_CTX_get(bn_ctx);
- a = BN_CTX_get(bn_ctx);
- b = BN_CTX_get(bn_ctx);
-
- if((NULL == r) || (NULL == a) || (NULL == b))
- {
- BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
- }
-
- GUARD(ossl_bin2bn(a, a_bin, R_SIZE));
- GUARD(ossl_bin2bn(b, b_bin, R_SIZE));
-
- if(BN_GF2m_add(r, a, b) == 0)
- {
- BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
- }
-
- GUARD(ossl_bn2bin(res_bin, r, R_SIZE));
-
- return SUCCESS;
-}
-
-// Perform a cyclic product by using OpenSSL.
-_INLINE_ ret_t
-ossl_cyclic_product(OUT BIGNUM *r,
- IN const BIGNUM *a,
- IN const BIGNUM *b,
- BN_CTX * bn_ctx)
-{
- BIGNUM *m = BN_CTX_get(bn_ctx);
- if(NULL == m)
- {
- BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
- }
-
- // m = x^PARAM_R - 1
- if((BN_set_bit(m, R_BITS) == 0) || (BN_set_bit(m, 0) == 0))
- {
- BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
- }
-
- // r = a*b mod m
- if(BN_GF2m_mod_mul(r, a, b, m, bn_ctx) == 0)
- {
- BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
- }
-
- return SUCCESS;
-}
-
-// Perform a cyclic product by using OpenSSL.
-ret_t
-cyclic_product(OUT uint8_t res_bin[R_SIZE],
- IN const uint8_t a_bin[R_SIZE],
- IN const uint8_t b_bin[R_SIZE])
-{
- DEFER_CLEANUP(BN_CTX *bn_ctx = BN_CTX_new(), BN_CTX_cleanup_pointer);
- BIGNUM *r = NULL;
- BIGNUM *a = NULL;
- BIGNUM *b = NULL;
-
- if(NULL == bn_ctx)
- {
- BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
- }
-
- BN_CTX_start(bn_ctx);
-
- r = BN_CTX_get(bn_ctx);
- a = BN_CTX_get(bn_ctx);
- b = BN_CTX_get(bn_ctx);
-
- if((NULL == r) || (NULL == a) || (NULL == b))
- {
- BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
- }
-
- GUARD(ossl_bin2bn(a, a_bin, R_SIZE));
- GUARD(ossl_bin2bn(b, b_bin, R_SIZE));
- GUARD(ossl_cyclic_product(r, a, b, bn_ctx));
- GUARD(ossl_bn2bin(res_bin, r, R_SIZE));
-
- return SUCCESS;
-}
-
-#endif // USE_OPENSSL_GF2M
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron,
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#include "openssl_utils.h"
+#include "utilities.h"
+#include <assert.h>
+#include <openssl/bn.h>
+#include <string.h>
+
+#ifdef USE_OPENSSL_GF2M
+
+# define MAX_OPENSSL_INV_TRIALS 1000
+
+_INLINE_ void
+BN_CTX_cleanup(BN_CTX *ctx)
+{
+ if(ctx)
+ {
+ BN_CTX_end(ctx);
+ BN_CTX_free(ctx);
+ }
+}
+
+DEFINE_POINTER_CLEANUP_FUNC(BN_CTX *, BN_CTX_cleanup);
+
+// Loading (big) numbers into OpenSSL should use Big Endian representation.
+// Therefore, the bytes ordering of the number should be reversed.
+_INLINE_ void
+reverse_endian(OUT uint8_t *res, IN const uint8_t *in, IN const uint32_t n)
+{
+ uint32_t i;
+
+ for(i = 0; i < (n / 2); i++)
+ {
+ uint64_t tmp = in[i];
+ res[i] = in[n - 1 - i];
+ res[n - 1 - i] = tmp;
+ }
+
+ // If the number of blocks is odd, swap also the middle block.
+ if(n % 2)
+ {
+ res[i] = in[i];
+ }
+}
+
+_INLINE_ ret_t
+ossl_bn2bin(OUT uint8_t *out, IN const BIGNUM *in, IN const uint32_t size)
+{
+ assert(size <= N_SIZE);
+ uint8_t be_tmp[N_SIZE] = {0};
+
+ memset(out, 0, size);
+
+ if(BN_bn2bin(in, be_tmp) == -1)
+ {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+ reverse_endian(out, be_tmp, BN_num_bytes(in));
+
+ return SUCCESS;
+}
+
+_INLINE_ ret_t
+ossl_bin2bn(IN BIGNUM *out, OUT const uint8_t *in, IN const uint32_t size)
+{
+ assert(size <= N_SIZE);
+ uint8_t be_tmp[N_SIZE] = {0};
+
+ reverse_endian(be_tmp, in, size);
+
+ if(BN_bin2bn(be_tmp, size, out) == 0)
+ {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+
+ return SUCCESS;
+}
+
+ret_t
+ossl_add(OUT uint8_t res_bin[R_SIZE],
+ IN const uint8_t a_bin[R_SIZE],
+ IN const uint8_t b_bin[R_SIZE])
+{
+ DEFER_CLEANUP(BN_CTX *bn_ctx = BN_CTX_new(), BN_CTX_cleanup_pointer);
+ BIGNUM *r = NULL;
+ BIGNUM *a = NULL;
+ BIGNUM *b = NULL;
+
+ if(NULL == bn_ctx)
+ {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+
+ BN_CTX_start(bn_ctx);
+
+ r = BN_CTX_get(bn_ctx);
+ a = BN_CTX_get(bn_ctx);
+ b = BN_CTX_get(bn_ctx);
+
+ if((NULL == r) || (NULL == a) || (NULL == b))
+ {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+
+ GUARD(ossl_bin2bn(a, a_bin, R_SIZE));
+ GUARD(ossl_bin2bn(b, b_bin, R_SIZE));
+
+ if(BN_GF2m_add(r, a, b) == 0)
+ {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+
+ GUARD(ossl_bn2bin(res_bin, r, R_SIZE));
+
+ return SUCCESS;
+}
+
+// Perform a cyclic product by using OpenSSL.
+_INLINE_ ret_t
+ossl_cyclic_product(OUT BIGNUM *r,
+ IN const BIGNUM *a,
+ IN const BIGNUM *b,
+ BN_CTX * bn_ctx)
+{
+ BIGNUM *m = BN_CTX_get(bn_ctx);
+ if(NULL == m)
+ {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+
+ // m = x^PARAM_R - 1
+ if((BN_set_bit(m, R_BITS) == 0) || (BN_set_bit(m, 0) == 0))
+ {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+
+ // r = a*b mod m
+ if(BN_GF2m_mod_mul(r, a, b, m, bn_ctx) == 0)
+ {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+
+ return SUCCESS;
+}
+
+// Perform a cyclic product by using OpenSSL.
+ret_t
+cyclic_product(OUT uint8_t res_bin[R_SIZE],
+ IN const uint8_t a_bin[R_SIZE],
+ IN const uint8_t b_bin[R_SIZE])
+{
+ DEFER_CLEANUP(BN_CTX *bn_ctx = BN_CTX_new(), BN_CTX_cleanup_pointer);
+ BIGNUM *r = NULL;
+ BIGNUM *a = NULL;
+ BIGNUM *b = NULL;
+
+ if(NULL == bn_ctx)
+ {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+
+ BN_CTX_start(bn_ctx);
+
+ r = BN_CTX_get(bn_ctx);
+ a = BN_CTX_get(bn_ctx);
+ b = BN_CTX_get(bn_ctx);
+
+ if((NULL == r) || (NULL == a) || (NULL == b))
+ {
+ BIKE_ERROR(EXTERNAL_LIB_ERROR_OPENSSL);
+ }
+
+ GUARD(ossl_bin2bn(a, a_bin, R_SIZE));
+ GUARD(ossl_bin2bn(b, b_bin, R_SIZE));
+ GUARD(ossl_cyclic_product(r, a, b, bn_ctx));
+ GUARD(ossl_bn2bin(res_bin, r, R_SIZE));
+
+ return SUCCESS;
+}
+
+#endif // USE_OPENSSL_GF2M
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.h
index 59438b6d70..4f1c55bd94 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/openssl_utils.h
@@ -1,33 +1,33 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron,
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#pragma once
-
-#include "types.h"
-
-#ifdef USE_OPENSSL
-# include <openssl/bn.h>
-# ifndef OPENSSL_NO_EC2M
-# define USE_OPENSSL_GF2M 1
-# endif
-#endif
-
-#ifdef USE_OPENSSL_GF2M
-
-ret_t
-ossl_add(OUT uint8_t res_bin[R_SIZE],
- IN const uint8_t a_bin[R_SIZE],
- IN const uint8_t b_bin[R_SIZE]);
-
-// Perform cyclic product by using OpenSSL
-ret_t
-cyclic_product(OUT uint8_t res_bin[R_SIZE],
- IN const uint8_t a_bin[R_SIZE],
- IN const uint8_t b_bin[R_SIZE]);
-
-#endif
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron,
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#pragma once
+
+#include "types.h"
+
+#ifdef USE_OPENSSL
+# include <openssl/bn.h>
+# ifndef OPENSSL_NO_EC2M
+# define USE_OPENSSL_GF2M 1
+# endif
+#endif
+
+#ifdef USE_OPENSSL_GF2M
+
+ret_t
+ossl_add(OUT uint8_t res_bin[R_SIZE],
+ IN const uint8_t a_bin[R_SIZE],
+ IN const uint8_t b_bin[R_SIZE]);
+
+// Perform cyclic product by using OpenSSL
+ret_t
+cyclic_product(OUT uint8_t res_bin[R_SIZE],
+ IN const uint8_t a_bin[R_SIZE],
+ IN const uint8_t b_bin[R_SIZE]);
+
+#endif
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.c
index 3686338fad..1efde4ddd1 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.c
@@ -1,118 +1,118 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#include "sampling.h"
-#include <assert.h>
-#include <string.h>
-
-_INLINE_ ret_t
-get_rand_mod_len(OUT uint32_t * rand_pos,
- IN const uint32_t len,
- IN OUT aes_ctr_prf_state_t *prf_state)
-{
- const uint64_t mask = MASK(bit_scan_reverse(len));
-
- do
- {
- // Generate 128bit of random numbers
- GUARD(aes_ctr_prf((uint8_t *)rand_pos, prf_state, sizeof(*rand_pos)));
-
- // Mask only relevant bits
- (*rand_pos) &= mask;
-
- // Break if a number smaller than len is found
- if((*rand_pos) < len)
- {
- break;
- }
-
- } while(1);
-
- return SUCCESS;
-}
-
-_INLINE_ void
-make_odd_weight(IN OUT r_t *r)
-{
- if(((r_bits_vector_weight(r) % 2) == 1))
- {
- // Already odd
- return;
- }
-
- r->raw[0] ^= 1;
-}
-
-// IN: must_be_odd - 1 true, 0 not
-ret_t
-sample_uniform_r_bits_with_fixed_prf_context(OUT r_t *r,
- IN OUT
- aes_ctr_prf_state_t *prf_state,
- IN const must_be_odd_t must_be_odd)
-{
- // Generate random data
- GUARD(aes_ctr_prf(r->raw, prf_state, R_SIZE));
-
- // Mask upper bits of the MSByte
- r->raw[R_SIZE - 1] &= MASK(R_BITS + 8 - (R_SIZE * 8));
-
- if(must_be_odd == MUST_BE_ODD)
- {
- make_odd_weight(r);
- }
-
- return SUCCESS;
-}
-
-_INLINE_ int
-is_new(IN const idx_t wlist[], IN const uint32_t ctr)
-{
- for(uint32_t i = 0; i < ctr; i++)
- {
- if(wlist[i] == wlist[ctr])
- {
- return 0;
- }
- }
-
- return 1;
-}
-
-// Assumption 1) paddded_len % 64 = 0!
-// Assumption 2) a is a len bits array. It is padded to be a padded_len
-// bytes array. The padded area may be modified and should
-// be ignored outside the function scope.
-ret_t
-generate_sparse_rep(OUT uint64_t * a,
- OUT idx_t wlist[],
- IN const uint32_t weight,
- IN const uint32_t len,
- IN const uint32_t padded_len,
- IN OUT aes_ctr_prf_state_t *prf_state)
-{
- assert(padded_len % 64 == 0);
- // Bits comparison
- assert((padded_len * 8) >= len);
-
- uint64_t ctr = 0;
-
- // Generate weight rand numbers
- do
- {
- GUARD(get_rand_mod_len(&wlist[ctr], len, prf_state));
- ctr += is_new(wlist, ctr);
- } while(ctr < weight);
-
- // Initialize to zero
- memset(a, 0, (len + 7) >> 3);
-
- // Assign values to "a"
- secure_set_bits(a, wlist, padded_len, weight);
-
- return SUCCESS;
-}
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#include "sampling.h"
+#include <assert.h>
+#include <string.h>
+
+_INLINE_ ret_t
+get_rand_mod_len(OUT uint32_t * rand_pos,
+ IN const uint32_t len,
+ IN OUT aes_ctr_prf_state_t *prf_state)
+{
+ const uint64_t mask = MASK(bit_scan_reverse(len));
+
+ do
+ {
+ // Generate 128bit of random numbers
+ GUARD(aes_ctr_prf((uint8_t *)rand_pos, prf_state, sizeof(*rand_pos)));
+
+ // Mask only relevant bits
+ (*rand_pos) &= mask;
+
+ // Break if a number smaller than len is found
+ if((*rand_pos) < len)
+ {
+ break;
+ }
+
+ } while(1);
+
+ return SUCCESS;
+}
+
+_INLINE_ void
+make_odd_weight(IN OUT r_t *r)
+{
+ if(((r_bits_vector_weight(r) % 2) == 1))
+ {
+ // Already odd
+ return;
+ }
+
+ r->raw[0] ^= 1;
+}
+
+// IN: must_be_odd - 1 true, 0 not
+ret_t
+sample_uniform_r_bits_with_fixed_prf_context(OUT r_t *r,
+ IN OUT
+ aes_ctr_prf_state_t *prf_state,
+ IN const must_be_odd_t must_be_odd)
+{
+ // Generate random data
+ GUARD(aes_ctr_prf(r->raw, prf_state, R_SIZE));
+
+ // Mask upper bits of the MSByte
+ r->raw[R_SIZE - 1] &= MASK(R_BITS + 8 - (R_SIZE * 8));
+
+ if(must_be_odd == MUST_BE_ODD)
+ {
+ make_odd_weight(r);
+ }
+
+ return SUCCESS;
+}
+
+_INLINE_ int
+is_new(IN const idx_t wlist[], IN const uint32_t ctr)
+{
+ for(uint32_t i = 0; i < ctr; i++)
+ {
+ if(wlist[i] == wlist[ctr])
+ {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+// Assumption 1) paddded_len % 64 = 0!
+// Assumption 2) a is a len bits array. It is padded to be a padded_len
+// bytes array. The padded area may be modified and should
+// be ignored outside the function scope.
+ret_t
+generate_sparse_rep(OUT uint64_t * a,
+ OUT idx_t wlist[],
+ IN const uint32_t weight,
+ IN const uint32_t len,
+ IN const uint32_t padded_len,
+ IN OUT aes_ctr_prf_state_t *prf_state)
+{
+ assert(padded_len % 64 == 0);
+ // Bits comparison
+ assert((padded_len * 8) >= len);
+
+ uint64_t ctr = 0;
+
+ // Generate weight rand numbers
+ do
+ {
+ GUARD(get_rand_mod_len(&wlist[ctr], len, prf_state));
+ ctr += is_new(wlist, ctr);
+ } while(ctr < weight);
+
+ // Initialize to zero
+ memset(a, 0, (len + 7) >> 3);
+
+ // Assign values to "a"
+ secure_set_bits(a, wlist, padded_len, weight);
+
+ return SUCCESS;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.h
index 1ffd56f34a..8d6caa6d7c 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling.h
@@ -1,78 +1,78 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#pragma once
-
-#include "aes_ctr_prf.h"
-#include "pq-crypto/s2n_pq_random.h"
-#include "utils/s2n_result.h"
-#include "utilities.h"
-
-typedef enum
-{
- NO_RESTRICTION = 0,
- MUST_BE_ODD = 1
-} must_be_odd_t;
-
-_INLINE_ ret_t
-get_seeds(OUT seeds_t *seeds)
-{
- if(s2n_result_is_ok(s2n_get_random_bytes(seeds->seed[0].raw, sizeof(seeds_t))))
- {
- return SUCCESS;
- }
- else
- {
- BIKE_ERROR(E_FAIL_TO_GET_SEED);
- }
-}
-
-// Return's an array of r pseudorandom bits
-// No restrictions exist for the top or bottom bits -
-// in case an odd number is required then set must_be_odd=1
-// Uses the provided prf context
-ret_t
-sample_uniform_r_bits_with_fixed_prf_context(OUT r_t *r,
- IN OUT
- aes_ctr_prf_state_t *prf_state,
- IN must_be_odd_t must_be_odd);
-
-// Return's an array of r pseudorandom bits
-// No restrictions exist for the top or bottom bits -
-// in case an odd number is required then set must_be_odd=1
-_INLINE_ ret_t
-sample_uniform_r_bits(OUT r_t *r,
- IN const seed_t * seed,
- IN const must_be_odd_t must_be_odd)
-{
- // For the seedexpander
- DEFER_CLEANUP(aes_ctr_prf_state_t prf_state = {0}, aes_ctr_prf_state_cleanup);
-
- GUARD(init_aes_ctr_prf_state(&prf_state, MAX_AES_INVOKATION, seed));
-
- GUARD(sample_uniform_r_bits_with_fixed_prf_context(r, &prf_state, must_be_odd));
-
- return SUCCESS;
-}
-
-// Generate a pseudorandom r of length len with a set weight
-// Using the pseudorandom ctx supplied
-// Outputs also a compressed (not ordered) list of indices
-ret_t
-generate_sparse_rep(OUT uint64_t *a,
- OUT idx_t wlist[],
- IN uint32_t weight,
- IN uint32_t len,
- IN uint32_t padded_len,
- IN OUT aes_ctr_prf_state_t *prf_state);
-
-EXTERNC void
-secure_set_bits(IN OUT uint64_t *a,
- IN const idx_t wlist[],
- IN uint32_t a_len,
- IN uint32_t weight);
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#pragma once
+
+#include "aes_ctr_prf.h"
+#include "pq-crypto/s2n_pq_random.h"
+#include "utils/s2n_result.h"
+#include "utilities.h"
+
+typedef enum
+{
+ NO_RESTRICTION = 0,
+ MUST_BE_ODD = 1
+} must_be_odd_t;
+
+_INLINE_ ret_t
+get_seeds(OUT seeds_t *seeds)
+{
+ if(s2n_result_is_ok(s2n_get_random_bytes(seeds->seed[0].raw, sizeof(seeds_t))))
+ {
+ return SUCCESS;
+ }
+ else
+ {
+ BIKE_ERROR(E_FAIL_TO_GET_SEED);
+ }
+}
+
+// Return's an array of r pseudorandom bits
+// No restrictions exist for the top or bottom bits -
+// in case an odd number is required then set must_be_odd=1
+// Uses the provided prf context
+ret_t
+sample_uniform_r_bits_with_fixed_prf_context(OUT r_t *r,
+ IN OUT
+ aes_ctr_prf_state_t *prf_state,
+ IN must_be_odd_t must_be_odd);
+
+// Return's an array of r pseudorandom bits
+// No restrictions exist for the top or bottom bits -
+// in case an odd number is required then set must_be_odd=1
+_INLINE_ ret_t
+sample_uniform_r_bits(OUT r_t *r,
+ IN const seed_t * seed,
+ IN const must_be_odd_t must_be_odd)
+{
+ // For the seedexpander
+ DEFER_CLEANUP(aes_ctr_prf_state_t prf_state = {0}, aes_ctr_prf_state_cleanup);
+
+ GUARD(init_aes_ctr_prf_state(&prf_state, MAX_AES_INVOKATION, seed));
+
+ GUARD(sample_uniform_r_bits_with_fixed_prf_context(r, &prf_state, must_be_odd));
+
+ return SUCCESS;
+}
+
+// Generate a pseudorandom r of length len with a set weight
+// Using the pseudorandom ctx supplied
+// Outputs also a compressed (not ordered) list of indices
+ret_t
+generate_sparse_rep(OUT uint64_t *a,
+ OUT idx_t wlist[],
+ IN uint32_t weight,
+ IN uint32_t len,
+ IN uint32_t padded_len,
+ IN OUT aes_ctr_prf_state_t *prf_state);
+
+EXTERNC void
+secure_set_bits(IN OUT uint64_t *a,
+ IN const idx_t wlist[],
+ IN uint32_t a_len,
+ IN uint32_t weight);
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling_portable.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling_portable.c
index 1ae7a6f247..e41e6b5cf2 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling_portable.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sampling_portable.c
@@ -1,48 +1,48 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#include "sampling.h"
-#include <assert.h>
-
-#define MAX_WEIGHT (T1 > DV ? T1 : DV)
-
-// This implementation assumes that the wlist contains fake list
-void
-secure_set_bits(IN OUT uint64_t * a,
- IN const idx_t wlist[],
- IN const uint32_t a_len_bytes,
- IN const uint32_t weight)
-{
- assert(a_len_bytes % 8 == 0);
-
- // Set arrays to the maximum possible for the stack protector
- assert(weight <= MAX_WEIGHT);
- uint64_t qw_pos[MAX_WEIGHT];
- uint64_t bit_pos[MAX_WEIGHT];
-
- // 1. Identify the QW position of each value and the bit position inside this
- // QW.
- for(uint32_t j = 0; j < weight; j++)
- {
- qw_pos[j] = wlist[j] >> 6;
- bit_pos[j] = BIT(wlist[j] & 0x3f);
- }
-
- // 2. Fill each QW in a constant time.
- for(uint32_t qw = 0; qw < (a_len_bytes / 8); qw++)
- {
- uint64_t tmp = 0;
- for(uint32_t j = 0; j < weight; j++)
- {
- uint64_t mask = (-1ULL) + (!secure_cmp32(qw_pos[j], qw));
- tmp |= (bit_pos[j] & mask);
- }
- // Set the bit in a masked way
- a[qw] |= tmp;
- }
-}
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#include "sampling.h"
+#include <assert.h>
+
+#define MAX_WEIGHT (T1 > DV ? T1 : DV)
+
+// This implementation assumes that the wlist contains fake list
+void
+secure_set_bits(IN OUT uint64_t * a,
+ IN const idx_t wlist[],
+ IN const uint32_t a_len_bytes,
+ IN const uint32_t weight)
+{
+ assert(a_len_bytes % 8 == 0);
+
+ // Set arrays to the maximum possible for the stack protector
+ assert(weight <= MAX_WEIGHT);
+ uint64_t qw_pos[MAX_WEIGHT];
+ uint64_t bit_pos[MAX_WEIGHT];
+
+ // 1. Identify the QW position of each value and the bit position inside this
+ // QW.
+ for(uint32_t j = 0; j < weight; j++)
+ {
+ qw_pos[j] = wlist[j] >> 6;
+ bit_pos[j] = BIT(wlist[j] & 0x3f);
+ }
+
+ // 2. Fill each QW in a constant time.
+ for(uint32_t qw = 0; qw < (a_len_bytes / 8); qw++)
+ {
+ uint64_t tmp = 0;
+ for(uint32_t j = 0; j < weight; j++)
+ {
+ uint64_t mask = (-1ULL) + (!secure_cmp32(qw_pos[j], qw));
+ tmp |= (bit_pos[j] & mask);
+ }
+ // Set the bit in a masked way
+ a[qw] |= tmp;
+ }
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/secure_decode_portable.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/secure_decode_portable.c
index 963c3257b7..dc4fbe01d8 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/secure_decode_portable.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/secure_decode_portable.c
@@ -1,66 +1,66 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#include "decode.h"
-#include "utilities.h"
-
-#define R_QW_HALF_LOG2 UPTOPOW2(R_QW / 2)
-
-_INLINE_ void
-rotr_big(OUT syndrome_t *out, IN const syndrome_t *in, IN size_t qw_num)
-{
- // For preventing overflows (comparison in bytes)
- bike_static_assert(sizeof(*out) > 8 * (R_QW + (2 * R_QW_HALF_LOG2)),
- rotr_big_err);
-
- memcpy(out, in, sizeof(*in));
-
- for(uint32_t idx = R_QW_HALF_LOG2; idx >= 1; idx >>= 1)
- {
- // Convert 32 bit mask to 64 bit mask
- const uint64_t mask = ((uint32_t)secure_l32_mask(qw_num, idx) + 1U) - 1ULL;
- qw_num = qw_num - (idx & mask);
-
- // Rotate R_QW quadwords and another idx quadwords needed by the next
- // iteration
- for(size_t i = 0; i < (R_QW + idx); i++)
- {
- out->qw[i] = (out->qw[i] & (~mask)) | (out->qw[i + idx] & mask);
- }
- }
-}
-
-_INLINE_ void
-rotr_small(OUT syndrome_t *out, IN const syndrome_t *in, IN const size_t bits)
-{
- bike_static_assert(bits < 64, rotr_small_err);
- bike_static_assert(sizeof(*out) > (8 * R_QW), rotr_small_qw_err);
-
- // Convert |bits| to 0/1 by using !!bits then create a mask of 0 or 0xffffffffff
- // Use high_shift to avoid undefined behaviour when doing x << 64;
- const uint64_t mask = (0 - (!!bits));
- const uint64_t high_shift = (64 - bits) & mask;
-
- for(size_t i = 0; i < R_QW; i++)
- {
- const uint64_t low_part = in->qw[i] >> bits;
- const uint64_t high_part = (in->qw[i + 1] << high_shift) & mask;
- out->qw[i] = low_part | high_part;
- }
-}
-
-void
-rotate_right(OUT syndrome_t *out,
- IN const syndrome_t *in,
- IN const uint32_t bitscount)
-{
- // Rotate (64-bit) quad-words
- rotr_big(out, in, (bitscount / 64));
- // Rotate bits (less than 64)
- rotr_small(out, out, (bitscount % 64));
-}
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#include "decode.h"
+#include "utilities.h"
+
+#define R_QW_HALF_LOG2 UPTOPOW2(R_QW / 2)
+
+_INLINE_ void
+rotr_big(OUT syndrome_t *out, IN const syndrome_t *in, IN size_t qw_num)
+{
+ // For preventing overflows (comparison in bytes)
+ bike_static_assert(sizeof(*out) > 8 * (R_QW + (2 * R_QW_HALF_LOG2)),
+ rotr_big_err);
+
+ memcpy(out, in, sizeof(*in));
+
+ for(uint32_t idx = R_QW_HALF_LOG2; idx >= 1; idx >>= 1)
+ {
+ // Convert 32 bit mask to 64 bit mask
+ const uint64_t mask = ((uint32_t)secure_l32_mask(qw_num, idx) + 1U) - 1ULL;
+ qw_num = qw_num - (idx & mask);
+
+ // Rotate R_QW quadwords and another idx quadwords needed by the next
+ // iteration
+ for(size_t i = 0; i < (R_QW + idx); i++)
+ {
+ out->qw[i] = (out->qw[i] & (~mask)) | (out->qw[i + idx] & mask);
+ }
+ }
+}
+
+_INLINE_ void
+rotr_small(OUT syndrome_t *out, IN const syndrome_t *in, IN const size_t bits)
+{
+ bike_static_assert(bits < 64, rotr_small_err);
+ bike_static_assert(sizeof(*out) > (8 * R_QW), rotr_small_qw_err);
+
+ // Convert |bits| to 0/1 by using !!bits then create a mask of 0 or 0xffffffffff
+ // Use high_shift to avoid undefined behaviour when doing x << 64;
+ const uint64_t mask = (0 - (!!bits));
+ const uint64_t high_shift = (64 - bits) & mask;
+
+ for(size_t i = 0; i < R_QW; i++)
+ {
+ const uint64_t low_part = in->qw[i] >> bits;
+ const uint64_t high_part = (in->qw[i + 1] << high_shift) & mask;
+ out->qw[i] = low_part | high_part;
+ }
+}
+
+void
+rotate_right(OUT syndrome_t *out,
+ IN const syndrome_t *in,
+ IN const uint32_t bitscount)
+{
+ // Rotate (64-bit) quad-words
+ rotr_big(out, in, (bitscount / 64));
+ // Rotate bits (less than 64)
+ rotr_small(out, out, (bitscount % 64));
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sha.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sha.h
index 63687055f2..f323cd6b67 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sha.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/sha.h
@@ -1,41 +1,41 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#pragma once
-
-#include "cleanup.h"
-#include "types.h"
-#include "utilities.h"
-#include <openssl/sha.h>
-
-#define SHA384_HASH_SIZE 48ULL
-#define SHA384_HASH_QWORDS (SHA384_HASH_SIZE / 8)
-
-typedef struct sha384_hash_s
-{
- union {
- uint8_t raw[SHA384_HASH_SIZE];
- uint64_t qw[SHA384_HASH_QWORDS];
- } u;
-} sha384_hash_t;
-bike_static_assert(sizeof(sha384_hash_t) == SHA384_HASH_SIZE, sha384_hash_size);
-
-typedef sha384_hash_t sha_hash_t;
-
-_INLINE_ void
-sha_hash_cleanup(IN OUT sha_hash_t *o)
-{
- secure_clean(o->u.raw, sizeof(*o));
-}
-
-_INLINE_ int
-sha(OUT sha_hash_t *hash_out, IN const uint32_t byte_len, IN const uint8_t *msg)
-{
- SHA384(msg, byte_len, hash_out->u.raw);
- return 1;
-}
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#pragma once
+
+#include "cleanup.h"
+#include "types.h"
+#include "utilities.h"
+#include <openssl/sha.h>
+
+#define SHA384_HASH_SIZE 48ULL
+#define SHA384_HASH_QWORDS (SHA384_HASH_SIZE / 8)
+
+typedef struct sha384_hash_s
+{
+ union {
+ uint8_t raw[SHA384_HASH_SIZE];
+ uint64_t qw[SHA384_HASH_QWORDS];
+ } u;
+} sha384_hash_t;
+bike_static_assert(sizeof(sha384_hash_t) == SHA384_HASH_SIZE, sha384_hash_size);
+
+typedef sha384_hash_t sha_hash_t;
+
+_INLINE_ void
+sha_hash_cleanup(IN OUT sha_hash_t *o)
+{
+ secure_clean(o->u.raw, sizeof(*o));
+}
+
+_INLINE_ int
+sha(OUT sha_hash_t *hash_out, IN const uint32_t byte_len, IN const uint8_t *msg)
+{
+ SHA384(msg, byte_len, hash_out->u.raw);
+ return 1;
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/types.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/types.h
index 044b7ee38e..647efdf811 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/types.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/types.h
@@ -1,139 +1,139 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#pragma once
-
-#include "bike_defs.h"
-#include "error.h"
-#include <stdint.h>
-
-typedef struct uint128_s
-{
- union {
- uint8_t bytes[16];
- uint32_t dw[4];
- uint64_t qw[2];
- } u;
-} uint128_t;
-
-// Make sure no compiler optimizations.
-#pragma pack(push, 1)
-
-typedef struct seed_s
-{
- uint8_t raw[32];
-} seed_t;
-
-typedef struct seeds_s
-{
- seed_t seed[NUM_OF_SEEDS];
-} seeds_t;
-
-typedef struct r_s
-{
- uint8_t raw[R_SIZE];
-} r_t;
-
-typedef struct e_s
-{
- uint8_t raw[N_SIZE];
-} e_t;
-
-typedef struct generic_param_n_s
-{
- r_t val[N0];
-} generic_param_n_t;
-
-typedef generic_param_n_t ct_t;
-typedef generic_param_n_t pk_t;
-typedef generic_param_n_t split_e_t;
-
-typedef uint32_t idx_t;
-
-typedef struct compressed_idx_dv_s
-{
- idx_t val[DV];
-} compressed_idx_dv_t;
-
-typedef compressed_idx_dv_t compressed_idx_dv_ar_t[N0];
-
-typedef struct compressed_idx_t_t
-{
- idx_t val[T1];
-} compressed_idx_t_t;
-
-// The secret key holds both representation for avoiding
-// the compression in the decaps stage
-typedef struct sk_s
-{
- compressed_idx_dv_ar_t wlist;
- r_t bin[N0];
-#ifndef INDCPA
- r_t sigma0;
- r_t sigma1;
-#endif
-} sk_t;
-
-// Pad e to the next Block
-typedef ALIGN(8) struct padded_e_s
-{
- e_t val;
- uint8_t pad[N_PADDED_SIZE - N_SIZE];
-} padded_e_t;
-
-// Pad r to the next Block
-typedef ALIGN(8) struct padded_r_s
-{
- r_t val;
- uint8_t pad[R_PADDED_SIZE - R_SIZE];
-} padded_r_t;
-
-typedef padded_r_t padded_param_n_t[N0];
-typedef padded_param_n_t pad_sk_t;
-typedef padded_param_n_t pad_pk_t;
-typedef padded_param_n_t pad_ct_t;
-
-// Need to allocate twice the room for the results
-typedef ALIGN(8) struct dbl_padded_r_s
-{
- r_t val;
- uint8_t pad[(2 * R_PADDED_SIZE) - R_SIZE];
-} dbl_padded_r_t;
-
-typedef dbl_padded_r_t dbl_padded_param_n_t[N0];
-typedef dbl_padded_param_n_t dbl_pad_pk_t;
-typedef dbl_padded_param_n_t dbl_pad_ct_t;
-typedef dbl_padded_param_n_t dbl_pad_syndrome_t;
-
-typedef struct ss_s
-{
- uint8_t raw[ELL_K_SIZE];
-} ss_t;
-
-// For optimization purposes
-// 1- For a faster rotate we duplicate the syndrome (dup1/2)
-// 2- We extend it to fit the boundary of DDQW
-typedef ALIGN(64) struct syndrome_s
-{
- uint64_t qw[3 * R_QW];
-} syndrome_t;
-
-typedef struct upc_slice_s
-{
- union {
- padded_r_t r;
- uint64_t qw[sizeof(padded_r_t) / 8];
- } u;
-} upc_slice_t;
-
-typedef struct upc_s
-{
- upc_slice_t slice[SLICES];
-} upc_t;
-
-#pragma pack(pop)
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#pragma once
+
+#include "bike_defs.h"
+#include "error.h"
+#include <stdint.h>
+
+typedef struct uint128_s
+{
+ union {
+ uint8_t bytes[16];
+ uint32_t dw[4];
+ uint64_t qw[2];
+ } u;
+} uint128_t;
+
+// Make sure no compiler optimizations.
+#pragma pack(push, 1)
+
+typedef struct seed_s
+{
+ uint8_t raw[32];
+} seed_t;
+
+typedef struct seeds_s
+{
+ seed_t seed[NUM_OF_SEEDS];
+} seeds_t;
+
+typedef struct r_s
+{
+ uint8_t raw[R_SIZE];
+} r_t;
+
+typedef struct e_s
+{
+ uint8_t raw[N_SIZE];
+} e_t;
+
+typedef struct generic_param_n_s
+{
+ r_t val[N0];
+} generic_param_n_t;
+
+typedef generic_param_n_t ct_t;
+typedef generic_param_n_t pk_t;
+typedef generic_param_n_t split_e_t;
+
+typedef uint32_t idx_t;
+
+typedef struct compressed_idx_dv_s
+{
+ idx_t val[DV];
+} compressed_idx_dv_t;
+
+typedef compressed_idx_dv_t compressed_idx_dv_ar_t[N0];
+
+typedef struct compressed_idx_t_t
+{
+ idx_t val[T1];
+} compressed_idx_t_t;
+
+// The secret key holds both representation for avoiding
+// the compression in the decaps stage
+typedef struct sk_s
+{
+ compressed_idx_dv_ar_t wlist;
+ r_t bin[N0];
+#ifndef INDCPA
+ r_t sigma0;
+ r_t sigma1;
+#endif
+} sk_t;
+
+// Pad e to the next Block
+typedef ALIGN(8) struct padded_e_s
+{
+ e_t val;
+ uint8_t pad[N_PADDED_SIZE - N_SIZE];
+} padded_e_t;
+
+// Pad r to the next Block
+typedef ALIGN(8) struct padded_r_s
+{
+ r_t val;
+ uint8_t pad[R_PADDED_SIZE - R_SIZE];
+} padded_r_t;
+
+typedef padded_r_t padded_param_n_t[N0];
+typedef padded_param_n_t pad_sk_t;
+typedef padded_param_n_t pad_pk_t;
+typedef padded_param_n_t pad_ct_t;
+
+// Need to allocate twice the room for the results
+typedef ALIGN(8) struct dbl_padded_r_s
+{
+ r_t val;
+ uint8_t pad[(2 * R_PADDED_SIZE) - R_SIZE];
+} dbl_padded_r_t;
+
+typedef dbl_padded_r_t dbl_padded_param_n_t[N0];
+typedef dbl_padded_param_n_t dbl_pad_pk_t;
+typedef dbl_padded_param_n_t dbl_pad_ct_t;
+typedef dbl_padded_param_n_t dbl_pad_syndrome_t;
+
+typedef struct ss_s
+{
+ uint8_t raw[ELL_K_SIZE];
+} ss_t;
+
+// For optimization purposes
+// 1- For a faster rotate we duplicate the syndrome (dup1/2)
+// 2- We extend it to fit the boundary of DDQW
+typedef ALIGN(64) struct syndrome_s
+{
+ uint64_t qw[3 * R_QW];
+} syndrome_t;
+
+typedef struct upc_slice_s
+{
+ union {
+ padded_r_t r;
+ uint64_t qw[sizeof(padded_r_t) / 8];
+ } u;
+} upc_slice_t;
+
+typedef struct upc_s
+{
+ upc_slice_t slice[SLICES];
+} upc_t;
+
+#pragma pack(pop)
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/utilities.c b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/utilities.c
index 4f049af86a..baed622b78 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/utilities.c
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/utilities.c
@@ -1,160 +1,160 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#include "utilities.h"
-#include <inttypes.h>
-
-#define BITS_IN_QW 64ULL
-#define BITS_IN_BYTE 8ULL
-
-// Print a new line only if we prints in qw blocks
-_INLINE_ void
-print_newline(IN const uint64_t qw_pos)
-{
-#ifndef NO_NEWLINE
- if((qw_pos % 4) == 3)
- {
- printf("\n ");
- }
-#endif
-}
-
-// This function is stitched for R_BITS vector
-uint64_t
-r_bits_vector_weight(IN const r_t *in)
-{
- uint64_t acc = 0;
- for(size_t i = 0; i < (R_SIZE - 1); i++)
- {
- acc += __builtin_popcount(in->raw[i]);
- }
-
- acc += __builtin_popcount(in->raw[R_SIZE - 1] & LAST_R_BYTE_MASK);
- return acc;
-}
-
-// Prints a QW in LE/BE in win/linux format
-_INLINE_ void
-print_uint64(IN const uint64_t val)
-{
-// If printing in BE is required swap the order of bytes
-#ifdef PRINT_IN_BE
- uint64_t tmp = bswap_64(val);
-#else
- uint64_t tmp = val;
-#endif
-
- printf("%.16" PRIx64, tmp);
-
-#ifndef NO_SPACE
- printf(" ");
-#endif
-}
-
-// Last block requires a special handling as we should zero mask all the bits
-// above the desired number endien - 0 - BE, 1 - LE Return 1 if last block was
-// printed else 0
-_INLINE_ uint8_t
-print_last_block(IN const uint8_t *last_bytes,
- IN const uint32_t bits_num,
- IN const uint32_t endien)
-{
- // Floor of bits/64 the reminder is in the next QW
- const uint32_t qw_num = bits_num / BITS_IN_QW;
-
- // How many bits to pad with zero
- const uint32_t rem_bits = bits_num - (BITS_IN_QW * qw_num);
-
- // We read byte byte and not the whole QW to avoid reading bad memory address
- const uint32_t bytes_num = ((rem_bits % 8) == 0) ? rem_bits / BITS_IN_BYTE
- : 1 + rem_bits / BITS_IN_BYTE;
-
- // Must be signed for the LE loop
- int i;
-
- if(0 == rem_bits)
- {
- return 0;
- }
-
- // Mask unneeded bits
- const uint8_t last_byte = (rem_bits % 8 == 0)
- ? last_bytes[bytes_num - 1]
- : last_bytes[bytes_num - 1] & MASK(rem_bits % 8);
- // BE
- if(0 == endien)
- {
- for(i = 0; (uint32_t)i < (bytes_num - 1); i++)
- {
- printf("%.2x", last_bytes[i]);
- }
-
- printf("%.2x", last_byte);
-
- for(i++; (uint32_t)i < sizeof(uint64_t); i++)
- {
- printf("__");
- }
- }
- else
- {
- for(i = sizeof(uint64_t) - 1; (uint32_t)i >= bytes_num; i--)
- {
- printf("__");
- }
-
- printf("%.2x", last_byte);
-
- for(i--; i >= 0; i--)
- {
- printf("%.2x", last_bytes[i]);
- }
- }
-
-#ifndef NO_SPACE
- printf(" ");
-#endif
-
- return 1;
-}
-
-void
-print_LE(IN const uint64_t *in, IN const uint32_t bits_num)
-{
- const uint32_t qw_num = bits_num / BITS_IN_QW;
-
- // Print the MSB QW
- uint32_t qw_pos = print_last_block((const uint8_t *)&in[qw_num], bits_num, 1);
-
- // Print each 8 bytes separated by space (if required)
- for(int i = ((int)qw_num) - 1; i >= 0; i--, qw_pos++)
- {
- print_uint64(in[i]);
- print_newline(qw_pos);
- }
-
- printf("\n");
-}
-
-void
-print_BE(IN const uint64_t *in, IN const uint32_t bits_num)
-{
- const uint32_t qw_num = bits_num / BITS_IN_QW;
-
- // Print each 16 numbers separatly
- for(uint32_t i = 0; i < qw_num; ++i)
- {
- print_uint64(in[i]);
- print_newline(i);
- }
-
- // Print the MSB QW
- print_last_block((const uint8_t *)&in[qw_num], bits_num, 0);
-
- printf("\n");
-}
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#include "utilities.h"
+#include <inttypes.h>
+
+#define BITS_IN_QW 64ULL
+#define BITS_IN_BYTE 8ULL
+
+// Print a new line only if we prints in qw blocks
+_INLINE_ void
+print_newline(IN const uint64_t qw_pos)
+{
+#ifndef NO_NEWLINE
+ if((qw_pos % 4) == 3)
+ {
+ printf("\n ");
+ }
+#endif
+}
+
+// This function is stitched for R_BITS vector
+uint64_t
+r_bits_vector_weight(IN const r_t *in)
+{
+ uint64_t acc = 0;
+ for(size_t i = 0; i < (R_SIZE - 1); i++)
+ {
+ acc += __builtin_popcount(in->raw[i]);
+ }
+
+ acc += __builtin_popcount(in->raw[R_SIZE - 1] & LAST_R_BYTE_MASK);
+ return acc;
+}
+
+// Prints a QW in LE/BE in win/linux format
+_INLINE_ void
+print_uint64(IN const uint64_t val)
+{
+// If printing in BE is required swap the order of bytes
+#ifdef PRINT_IN_BE
+ uint64_t tmp = bswap_64(val);
+#else
+ uint64_t tmp = val;
+#endif
+
+ printf("%.16" PRIx64, tmp);
+
+#ifndef NO_SPACE
+ printf(" ");
+#endif
+}
+
+// Last block requires a special handling as we should zero mask all the bits
+// above the desired number endien - 0 - BE, 1 - LE Return 1 if last block was
+// printed else 0
+_INLINE_ uint8_t
+print_last_block(IN const uint8_t *last_bytes,
+ IN const uint32_t bits_num,
+ IN const uint32_t endien)
+{
+ // Floor of bits/64 the reminder is in the next QW
+ const uint32_t qw_num = bits_num / BITS_IN_QW;
+
+ // How many bits to pad with zero
+ const uint32_t rem_bits = bits_num - (BITS_IN_QW * qw_num);
+
+ // We read byte byte and not the whole QW to avoid reading bad memory address
+ const uint32_t bytes_num = ((rem_bits % 8) == 0) ? rem_bits / BITS_IN_BYTE
+ : 1 + rem_bits / BITS_IN_BYTE;
+
+ // Must be signed for the LE loop
+ int i;
+
+ if(0 == rem_bits)
+ {
+ return 0;
+ }
+
+ // Mask unneeded bits
+ const uint8_t last_byte = (rem_bits % 8 == 0)
+ ? last_bytes[bytes_num - 1]
+ : last_bytes[bytes_num - 1] & MASK(rem_bits % 8);
+ // BE
+ if(0 == endien)
+ {
+ for(i = 0; (uint32_t)i < (bytes_num - 1); i++)
+ {
+ printf("%.2x", last_bytes[i]);
+ }
+
+ printf("%.2x", last_byte);
+
+ for(i++; (uint32_t)i < sizeof(uint64_t); i++)
+ {
+ printf("__");
+ }
+ }
+ else
+ {
+ for(i = sizeof(uint64_t) - 1; (uint32_t)i >= bytes_num; i--)
+ {
+ printf("__");
+ }
+
+ printf("%.2x", last_byte);
+
+ for(i--; i >= 0; i--)
+ {
+ printf("%.2x", last_bytes[i]);
+ }
+ }
+
+#ifndef NO_SPACE
+ printf(" ");
+#endif
+
+ return 1;
+}
+
+void
+print_LE(IN const uint64_t *in, IN const uint32_t bits_num)
+{
+ const uint32_t qw_num = bits_num / BITS_IN_QW;
+
+ // Print the MSB QW
+ uint32_t qw_pos = print_last_block((const uint8_t *)&in[qw_num], bits_num, 1);
+
+ // Print each 8 bytes separated by space (if required)
+ for(int i = ((int)qw_num) - 1; i >= 0; i--, qw_pos++)
+ {
+ print_uint64(in[i]);
+ print_newline(qw_pos);
+ }
+
+ printf("\n");
+}
+
+void
+print_BE(IN const uint64_t *in, IN const uint32_t bits_num)
+{
+ const uint32_t qw_num = bits_num / BITS_IN_QW;
+
+ // Print each 16 numbers separatly
+ for(uint32_t i = 0; i < qw_num; ++i)
+ {
+ print_uint64(in[i]);
+ print_newline(i);
+ }
+
+ // Print the MSB QW
+ print_last_block((const uint8_t *)&in[qw_num], bits_num, 0);
+
+ printf("\n");
+}
diff --git a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/utilities.h b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/utilities.h
index be8f4b9b10..bd2f163183 100644
--- a/contrib/restricted/aws/s2n/pq-crypto/bike_r2/utilities.h
+++ b/contrib/restricted/aws/s2n/pq-crypto/bike_r2/utilities.h
@@ -1,158 +1,158 @@
-/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0"
- *
- * Written by Nir Drucker and Shay Gueron
- * AWS Cryptographic Algorithms Group.
- * (ndrucker@amazon.com, gueron@amazon.com)
- */
-
-#pragma once
-
-#include "cleanup.h"
-
-#ifndef bswap_64
-# define bswap_64(x) __builtin_bswap64(x)
-#endif
-
-// Printing values in Little Endian
-void
-print_LE(IN const uint64_t *in, IN uint32_t bits_num);
-
-// Printing values in Big Endian
-void
-print_BE(IN const uint64_t *in, IN uint32_t bits_num);
-
-// Printing number is required only in verbose level 2 or above
-#if VERBOSE >= 2
-# ifdef PRINT_IN_BE
-// Print in Big Endian
-# define print(name, in, bits_num) \
- do \
- { \
- EDMSG(name); \
- print_BE(in, bits_num); \
- } while(0)
-# else
-// Print in Little Endian
-# define print(name, in, bits_num) \
- do \
- { \
- EDMSG(name); \
- print_LE(in, bits_num); \
- } while(0)
-# endif
-#else
-// No prints at all
-# define print(name, in, bits_num)
-#endif
-
-// Comparing value in a constant time manner
-_INLINE_ uint32_t
-secure_cmp(IN const uint8_t *a, IN const uint8_t *b, IN const uint32_t size)
-{
- volatile uint8_t res = 0;
-
- for(uint32_t i = 0; i < size; ++i)
- {
- res |= (a[i] ^ b[i]);
- }
-
- return (0 == res);
-}
-
-uint64_t
-r_bits_vector_weight(IN const r_t *in);
-
-// Constant time
-_INLINE_ uint32_t
-iszero(IN const uint8_t *s, IN const uint32_t len)
-{
- volatile uint32_t res = 0;
- for(uint64_t i = 0; i < len; i++)
- {
- res |= s[i];
- }
- return (0 == res);
-}
-
-// BSR returns ceil(log2(val))
-_INLINE_ uint8_t
-bit_scan_reverse(uint64_t val)
-{
- // index is always smaller than 64
- uint8_t index = 0;
-
- while(val != 0)
- {
- val >>= 1;
- index++;
- }
-
- return index;
-}
-
-// Return 1 if equal 0 otherwise
-_INLINE_ uint32_t
-secure_cmp32(IN const uint32_t v1, IN const uint32_t v2)
-{
-#if defined(__aarch64__)
- uint32_t res;
- __asm__ __volatile__("cmp %w1, %w2; \n "
- "cset %w0, EQ; \n"
- : "=r"(res)
- : "r"(v1), "r"(v2)
- :);
- return res;
-#elif defined(__x86_64__) || defined(__i386__)
- uint32_t res;
- __asm__ __volatile__("xor %%edx, %%edx; \n"
- "cmp %1, %2; \n "
- "sete %%dl; \n"
- "mov %%edx, %0; \n"
- : "=r"(res)
- : "r"(v1), "r"(v2)
- : "rdx");
- return res;
-#else
- // Insecure comparison: The main purpose of secure_cmp32 is to avoid
- // branches and thus to prevent potential side channel attacks. To do that
- // we normally leverage some CPU special instructions such as "sete"
- // (for __x86_64__) and "cset" (for __aarch64__). When dealing with general
- // CPU architectures, the interpretation of the line below is left for the
- // compiler, which may lead to an insecure branch.
- return (v1 == v2 ? 1 : 0);
-#endif
-}
-
-// Return 0 if v1 < v2, (-1) otherwise
-_INLINE_ uint32_t
-secure_l32_mask(IN const uint32_t v1, IN const uint32_t v2)
-{
-#if defined(__aarch64__)
- uint32_t res;
- __asm__ __volatile__("cmp %w2, %w1; \n "
- "cset %w0, HI; \n"
- : "=r"(res)
- : "r"(v1), "r"(v2)
- :);
- return (res - 1);
-#elif defined(__x86_64__) || defined(__i386__)
- uint32_t res;
- __asm__ __volatile__("xor %%edx, %%edx; \n"
- "cmp %1, %2; \n "
- "setl %%dl; \n"
- "dec %%edx; \n"
- "mov %%edx, %0; \n"
-
- : "=r"(res)
- : "r"(v2), "r"(v1)
- : "rdx");
-
- return res;
-#else
- // If v1 >= v2 then the subtraction result is 0^32||(v1-v2)
- // else it will be 1^32||(v2-v1+1). Subsequently, negating the upper
- // 32 bits gives 0 if v1 < v2 and otherwise (-1).
- return ~((uint32_t)(((uint64_t)v1 - (uint64_t)v2) >> 32));
-#endif
-}
+/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0"
+ *
+ * Written by Nir Drucker and Shay Gueron
+ * AWS Cryptographic Algorithms Group.
+ * (ndrucker@amazon.com, gueron@amazon.com)
+ */
+
+#pragma once
+
+#include "cleanup.h"
+
+#ifndef bswap_64
+# define bswap_64(x) __builtin_bswap64(x)
+#endif
+
+// Printing values in Little Endian
+void
+print_LE(IN const uint64_t *in, IN uint32_t bits_num);
+
+// Printing values in Big Endian
+void
+print_BE(IN const uint64_t *in, IN uint32_t bits_num);
+
+// Printing number is required only in verbose level 2 or above
+#if VERBOSE >= 2
+# ifdef PRINT_IN_BE
+// Print in Big Endian
+# define print(name, in, bits_num) \
+ do \
+ { \
+ EDMSG(name); \
+ print_BE(in, bits_num); \
+ } while(0)
+# else
+// Print in Little Endian
+# define print(name, in, bits_num) \
+ do \
+ { \
+ EDMSG(name); \
+ print_LE(in, bits_num); \
+ } while(0)
+# endif
+#else
+// No prints at all
+# define print(name, in, bits_num)
+#endif
+
+// Comparing value in a constant time manner
+_INLINE_ uint32_t
+secure_cmp(IN const uint8_t *a, IN const uint8_t *b, IN const uint32_t size)
+{
+ volatile uint8_t res = 0;
+
+ for(uint32_t i = 0; i < size; ++i)
+ {
+ res |= (a[i] ^ b[i]);
+ }
+
+ return (0 == res);
+}
+
+uint64_t
+r_bits_vector_weight(IN const r_t *in);
+
+// Constant time
+_INLINE_ uint32_t
+iszero(IN const uint8_t *s, IN const uint32_t len)
+{
+ volatile uint32_t res = 0;
+ for(uint64_t i = 0; i < len; i++)
+ {
+ res |= s[i];
+ }
+ return (0 == res);
+}
+
+// BSR returns ceil(log2(val))
+_INLINE_ uint8_t
+bit_scan_reverse(uint64_t val)
+{
+ // index is always smaller than 64
+ uint8_t index = 0;
+
+ while(val != 0)
+ {
+ val >>= 1;
+ index++;
+ }
+
+ return index;
+}
+
+// Return 1 if equal 0 otherwise
+_INLINE_ uint32_t
+secure_cmp32(IN const uint32_t v1, IN const uint32_t v2)
+{
+#if defined(__aarch64__)
+ uint32_t res;
+ __asm__ __volatile__("cmp %w1, %w2; \n "
+ "cset %w0, EQ; \n"
+ : "=r"(res)
+ : "r"(v1), "r"(v2)
+ :);
+ return res;
+#elif defined(__x86_64__) || defined(__i386__)
+ uint32_t res;
+ __asm__ __volatile__("xor %%edx, %%edx; \n"
+ "cmp %1, %2; \n "
+ "sete %%dl; \n"
+ "mov %%edx, %0; \n"
+ : "=r"(res)
+ : "r"(v1), "r"(v2)
+ : "rdx");
+ return res;
+#else
+ // Insecure comparison: The main purpose of secure_cmp32 is to avoid
+ // branches and thus to prevent potential side channel attacks. To do that
+ // we normally leverage some CPU special instructions such as "sete"
+ // (for __x86_64__) and "cset" (for __aarch64__). When dealing with general
+ // CPU architectures, the interpretation of the line below is left for the
+ // compiler, which may lead to an insecure branch.
+ return (v1 == v2 ? 1 : 0);
+#endif
+}
+
+// Return 0 if v1 < v2, (-1) otherwise
+_INLINE_ uint32_t
+secure_l32_mask(IN const uint32_t v1, IN const uint32_t v2)
+{
+#if defined(__aarch64__)
+ uint32_t res;
+ __asm__ __volatile__("cmp %w2, %w1; \n "
+ "cset %w0, HI; \n"
+ : "=r"(res)
+ : "r"(v1), "r"(v2)
+ :);
+ return (res - 1);
+#elif defined(__x86_64__) || defined(__i386__)
+ uint32_t res;
+ __asm__ __volatile__("xor %%edx, %%edx; \n"
+ "cmp %1, %2; \n "
+ "setl %%dl; \n"
+ "dec %%edx; \n"
+ "mov %%edx, %0; \n"
+
+ : "=r"(res)
+ : "r"(v2), "r"(v1)
+ : "rdx");
+
+ return res;
+#else
+ // If v1 >= v2 then the subtraction result is 0^32||(v1-v2)
+ // else it will be 1^32||(v2-v1+1). Subsequently, negating the upper
+ // 32 bits gives 0 if v1 < v2 and otherwise (-1).
+ return ~((uint32_t)(((uint64_t)v1 - (uint64_t)v2) >> 32));
+#endif
+}