aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrobot-piglet <robot-piglet@yandex-team.com>2024-12-03 10:53:49 +0300
committerrobot-piglet <robot-piglet@yandex-team.com>2024-12-03 11:36:17 +0300
commitee6fb9c6075c7ace663657969cd59005b5d0b656 (patch)
tree32bbd1063eab011d9ed28b692b012e71a838cbc5
parentb0c2229a8d320931b2a986503a39f0f805956f11 (diff)
downloadydb-ee6fb9c6075c7ace663657969cd59005b5d0b656.tar.gz
Intermediate changes
commit_hash:a2b0ddba5d8e19d7862fd2fb5a4a7f704e38ec07
-rw-r--r--contrib/restricted/aws/aws-c-cal/.yandex_meta/__init__.py15
-rw-r--r--contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.copyrights.report5
-rw-r--r--contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.licenses.report5
-rw-r--r--contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_aes.c1121
-rw-r--r--contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_ecc.c485
-rw-r--r--contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_hash.c220
-rw-r--r--contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_hmac.c132
-rw-r--r--contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_platform_init.c12
-rw-r--r--contrib/restricted/aws/aws-c-cal/ya.make14
9 files changed, 2008 insertions, 1 deletions
diff --git a/contrib/restricted/aws/aws-c-cal/.yandex_meta/__init__.py b/contrib/restricted/aws/aws-c-cal/.yandex_meta/__init__.py
index 0ead65e056..54f4b1b183 100644
--- a/contrib/restricted/aws/aws-c-cal/.yandex_meta/__init__.py
+++ b/contrib/restricted/aws/aws-c-cal/.yandex_meta/__init__.py
@@ -9,6 +9,7 @@ def post_install(self):
# Support Darwin.
linux_srcs = files(self.srcdir + "/source/unix/", rel=self.srcdir)
darwin_srcs = files(self.srcdir + "/source/darwin/", rel=self.srcdir)
+ windows_srcs = files(self.srcdir + "/source/windows/", rel=self.srcdir)
m.SRCS -= set(linux_srcs)
m.after(
"SRCS",
@@ -18,6 +19,15 @@ def post_install(self):
SRCS=darwin_srcs,
LDFLAGS=[Words("-framework", "Security")],
),
+ OS_WINDOWS=Linkable(SRCS=windows_srcs),
+ ),
+ )
+ m.after(
+ "CFLAGS",
+ Switch(
+ OS_WINDOWS=Linkable(
+ CFLAGS=["-DAWS_CAL_EXPORTS"],
+ ),
),
)
@@ -25,7 +35,10 @@ def post_install(self):
aws_c_cal = CMakeNinjaNixProject(
arcdir="contrib/restricted/aws/aws-c-cal",
nixattr="aws-c-cal",
- copy_sources=["source/darwin/"],
+ copy_sources=[
+ "source/darwin/",
+ "source/windows/",
+ ],
ignore_targets=["sha256_profile"],
post_install=post_install,
)
diff --git a/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.copyrights.report b/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.copyrights.report
index 118194b3f4..62a09c2294 100644
--- a/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.copyrights.report
+++ b/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.copyrights.report
@@ -68,6 +68,11 @@ BELONGS ya.make
source/unix/opensslcrypto_ecc.c [2:2]
source/unix/opensslcrypto_hash.c [2:2]
source/unix/opensslcrypto_hmac.c [2:2]
+ source/windows/bcrypt_aes.c [2:2]
+ source/windows/bcrypt_ecc.c [2:2]
+ source/windows/bcrypt_hash.c [2:2]
+ source/windows/bcrypt_hmac.c [2:2]
+ source/windows/bcrypt_platform_init.c [2:2]
KEEP COPYRIGHT_SERVICE_LABEL 9b3428451fa759287a2e04cd16a4619c
BELONGS ya.make
diff --git a/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.licenses.report b/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.licenses.report
index 09aea17122..ad73f02034 100644
--- a/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.licenses.report
+++ b/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.licenses.report
@@ -129,6 +129,11 @@ BELONGS ya.make
source/unix/opensslcrypto_ecc.c [3:3]
source/unix/opensslcrypto_hash.c [3:3]
source/unix/opensslcrypto_hmac.c [3:3]
+ source/windows/bcrypt_aes.c [3:3]
+ source/windows/bcrypt_ecc.c [3:3]
+ source/windows/bcrypt_hash.c [3:3]
+ source/windows/bcrypt_hmac.c [3:3]
+ source/windows/bcrypt_platform_init.c [3:3]
SKIP LicenseRef-scancode-generic-cla ee24fdc60600747c7d12c32055b0011d
BELONGS ya.make
diff --git a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_aes.c b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_aes.c
new file mode 100644
index 0000000000..aeb646e66a
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_aes.c
@@ -0,0 +1,1121 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/cal/private/symmetric_cipher_priv.h>
+
+#include <windows.h>
+
+/* keep the space to prevent formatters from reordering this with the Windows.h header. */
+#include <bcrypt.h>
+
+#define NT_SUCCESS(status) ((NTSTATUS)status >= 0)
+
+/* handles for AES modes and algorithms we'll be using. These are initialized once and allowed to leak. */
+static aws_thread_once s_aes_thread_once = AWS_THREAD_ONCE_STATIC_INIT;
+static BCRYPT_ALG_HANDLE s_aes_cbc_algorithm_handle = NULL;
+static BCRYPT_ALG_HANDLE s_aes_gcm_algorithm_handle = NULL;
+static BCRYPT_ALG_HANDLE s_aes_ctr_algorithm_handle = NULL;
+static BCRYPT_ALG_HANDLE s_aes_keywrap_algorithm_handle = NULL;
+
+struct aes_bcrypt_cipher {
+ struct aws_symmetric_cipher cipher;
+ BCRYPT_ALG_HANDLE alg_handle;
+ /* the loaded key handle. */
+ BCRYPT_KEY_HANDLE key_handle;
+ /* Used for GCM mode to store IV, tag, and aad */
+ BCRYPT_AUTHENTICATED_CIPHER_MODE_INFO *auth_info_ptr;
+ /* Updated on the fly for things like constant-time CBC padding and GCM hash chaining */
+ DWORD cipher_flags;
+ /* For things to work, they have to be in 16 byte chunks in several scenarios. Use this
+ Buffer for storing excess bytes until we have 16 bytes to operate on. */
+ struct aws_byte_buf overflow;
+ /* This gets updated as the algorithms run so it isn't the original IV. That's why its separate */
+ struct aws_byte_buf working_iv;
+ /* A buffer to keep around for the GMAC for GCM. */
+ struct aws_byte_buf working_mac_buffer;
+};
+
+static void s_load_alg_handles(void *user_data) {
+ (void)user_data;
+
+ /* this function is incredibly slow, LET IT LEAK*/
+ NTSTATUS status = BCryptOpenAlgorithmProvider(&s_aes_cbc_algorithm_handle, BCRYPT_AES_ALGORITHM, NULL, 0);
+ AWS_FATAL_ASSERT(s_aes_cbc_algorithm_handle && "BCryptOpenAlgorithmProvider() failed");
+
+ status = BCryptSetProperty(
+ s_aes_cbc_algorithm_handle,
+ BCRYPT_CHAINING_MODE,
+ (PUCHAR)BCRYPT_CHAIN_MODE_CBC,
+ (ULONG)(wcslen(BCRYPT_CHAIN_MODE_CBC) + 1),
+ 0);
+
+ AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for CBC chaining mode failed");
+
+ /* Set up GCM algorithm */
+ status = BCryptOpenAlgorithmProvider(&s_aes_gcm_algorithm_handle, BCRYPT_AES_ALGORITHM, NULL, 0);
+ AWS_FATAL_ASSERT(s_aes_gcm_algorithm_handle && "BCryptOpenAlgorithmProvider() failed");
+
+ status = BCryptSetProperty(
+ s_aes_gcm_algorithm_handle,
+ BCRYPT_CHAINING_MODE,
+ (PUCHAR)BCRYPT_CHAIN_MODE_GCM,
+ (ULONG)(wcslen(BCRYPT_CHAIN_MODE_GCM) + 1),
+ 0);
+
+ AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for GCM chaining mode failed");
+
+ /* Setup CTR algorithm */
+ status = BCryptOpenAlgorithmProvider(&s_aes_ctr_algorithm_handle, BCRYPT_AES_ALGORITHM, NULL, 0);
+ AWS_FATAL_ASSERT(s_aes_ctr_algorithm_handle && "BCryptOpenAlgorithmProvider() failed");
+
+ /* This is ECB because windows doesn't do CTR mode for you.
+ Instead we use ECB and XOR the encrypted IV and data to operate on for each block. */
+ status = BCryptSetProperty(
+ s_aes_ctr_algorithm_handle,
+ BCRYPT_CHAINING_MODE,
+ (PUCHAR)BCRYPT_CHAIN_MODE_ECB,
+ (ULONG)(wcslen(BCRYPT_CHAIN_MODE_ECB) + 1),
+ 0);
+
+ AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for ECB chaining mode failed");
+
+ /* Setup KEYWRAP algorithm */
+ status = BCryptOpenAlgorithmProvider(&s_aes_keywrap_algorithm_handle, BCRYPT_AES_ALGORITHM, NULL, 0);
+ AWS_FATAL_ASSERT(s_aes_ctr_algorithm_handle && "BCryptOpenAlgorithmProvider() failed");
+
+ AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for KeyWrap failed");
+}
+
+static BCRYPT_KEY_HANDLE s_import_key_blob(
+ BCRYPT_ALG_HANDLE algHandle,
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *key) {
+ NTSTATUS status = 0;
+
+ BCRYPT_KEY_DATA_BLOB_HEADER key_data;
+ key_data.dwMagic = BCRYPT_KEY_DATA_BLOB_MAGIC;
+ key_data.dwVersion = BCRYPT_KEY_DATA_BLOB_VERSION1;
+ key_data.cbKeyData = (ULONG)key->len;
+
+ struct aws_byte_buf key_data_buf;
+ aws_byte_buf_init(&key_data_buf, allocator, sizeof(key_data) + key->len);
+ aws_byte_buf_write(&key_data_buf, (const uint8_t *)&key_data, sizeof(key_data));
+ aws_byte_buf_write(&key_data_buf, key->buffer, key->len);
+
+ BCRYPT_KEY_HANDLE key_handle;
+ status = BCryptImportKey(
+ algHandle, NULL, BCRYPT_KEY_DATA_BLOB, &key_handle, NULL, 0, key_data_buf.buffer, (ULONG)key_data_buf.len, 0);
+
+ aws_byte_buf_clean_up_secure(&key_data_buf);
+
+ if (!NT_SUCCESS(status)) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ return key_handle;
+}
+
+static void s_aes_default_destroy(struct aws_symmetric_cipher *cipher) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ aws_byte_buf_clean_up_secure(&cipher->key);
+ aws_byte_buf_clean_up_secure(&cipher->iv);
+ aws_byte_buf_clean_up_secure(&cipher->tag);
+ aws_byte_buf_clean_up_secure(&cipher->aad);
+
+ /* clean_up_secure exists in versions of aws-c-common that don't check that the
+ buffer has a buffer and an allocator before freeing the memory. Instead,
+ check here. If it's set the buffer was owned and needs to be cleaned up, otherwise
+ it can just be dropped as it was an alias.*/
+ if (cipher_impl->working_iv.allocator) {
+ aws_byte_buf_clean_up_secure(&cipher_impl->working_iv);
+ }
+
+ aws_byte_buf_clean_up_secure(&cipher_impl->overflow);
+ aws_byte_buf_clean_up_secure(&cipher_impl->working_mac_buffer);
+
+ if (cipher_impl->key_handle) {
+ BCryptDestroyKey(cipher_impl->key_handle);
+ cipher_impl->key_handle = NULL;
+ }
+
+ if (cipher_impl->auth_info_ptr) {
+ aws_mem_release(cipher->allocator, cipher_impl->auth_info_ptr);
+ cipher_impl->auth_info_ptr = NULL;
+ }
+
+ aws_mem_release(cipher->allocator, cipher_impl);
+}
+
+/* just a utility function for setting up windows Ciphers and keys etc....
+ Handles copying key/iv etc... data to the right buffers and then setting them
+ on the windows handles used for the encryption operations. */
+static int s_initialize_cipher_materials(
+ struct aes_bcrypt_cipher *cipher,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv,
+ const struct aws_byte_cursor *tag,
+ const struct aws_byte_cursor *aad,
+ size_t iv_size,
+ bool is_ctr_mode,
+ bool is_gcm) {
+
+ if (!cipher->cipher.key.len) {
+ if (key) {
+ aws_byte_buf_init_copy_from_cursor(&cipher->cipher.key, cipher->cipher.allocator, *key);
+ } else {
+ aws_byte_buf_init(&cipher->cipher.key, cipher->cipher.allocator, AWS_AES_256_KEY_BYTE_LEN);
+ aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher.key);
+ }
+ }
+
+ if (!cipher->cipher.iv.len && iv_size) {
+ if (iv) {
+ aws_byte_buf_init_copy_from_cursor(&cipher->cipher.iv, cipher->cipher.allocator, *iv);
+ } else {
+ aws_byte_buf_init(&cipher->cipher.iv, cipher->cipher.allocator, iv_size);
+ aws_symmetric_cipher_generate_initialization_vector(iv_size, is_ctr_mode, &cipher->cipher.iv);
+ }
+ }
+
+ /* these fields are only used in GCM mode. */
+ if (is_gcm) {
+ if (!cipher->cipher.tag.len) {
+ if (tag) {
+ aws_byte_buf_init_copy_from_cursor(&cipher->cipher.tag, cipher->cipher.allocator, *tag);
+ } else {
+ aws_byte_buf_init(&cipher->cipher.tag, cipher->cipher.allocator, AWS_AES_256_CIPHER_BLOCK_SIZE);
+ aws_byte_buf_secure_zero(&cipher->cipher.tag);
+ /* windows handles this, just go ahead and tell the API it's got a length. */
+ cipher->cipher.tag.len = AWS_AES_256_CIPHER_BLOCK_SIZE;
+ }
+ }
+
+ if (!cipher->cipher.aad.len) {
+ if (aad) {
+ aws_byte_buf_init_copy_from_cursor(&cipher->cipher.aad, cipher->cipher.allocator, *aad);
+ }
+ }
+
+ if (!cipher->working_mac_buffer.len) {
+ aws_byte_buf_init(&cipher->working_mac_buffer, cipher->cipher.allocator, AWS_AES_256_CIPHER_BLOCK_SIZE);
+ aws_byte_buf_secure_zero(&cipher->working_mac_buffer);
+ /* windows handles this, just go ahead and tell the API it's got a length. */
+ cipher->working_mac_buffer.len = AWS_AES_256_CIPHER_BLOCK_SIZE;
+ }
+ }
+
+ cipher->key_handle = s_import_key_blob(cipher->alg_handle, cipher->cipher.allocator, &cipher->cipher.key);
+
+ if (!cipher->key_handle) {
+ cipher->cipher.good = false;
+ return AWS_OP_ERR;
+ }
+
+ cipher->cipher_flags = 0;
+
+ /* In GCM mode, the IV is set on the auth info pointer and a working copy
+ is passed to each encryt call. CBC and CTR mode function differently here
+ and the IV is set on the key itself. */
+ if (!is_gcm && cipher->cipher.iv.len) {
+ NTSTATUS status = BCryptSetProperty(
+ cipher->key_handle,
+ BCRYPT_INITIALIZATION_VECTOR,
+ cipher->cipher.iv.buffer,
+ (ULONG)cipher->cipher.iv.len,
+ 0);
+
+ if (!NT_SUCCESS(status)) {
+ cipher->cipher.good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ } else if (is_gcm) {
+
+ cipher->auth_info_ptr =
+ aws_mem_acquire(cipher->cipher.allocator, sizeof(BCRYPT_AUTHENTICATED_CIPHER_MODE_INFO));
+
+ /* Create a new authenticated cipher mode info object for GCM mode */
+ BCRYPT_INIT_AUTH_MODE_INFO(*cipher->auth_info_ptr);
+ cipher->auth_info_ptr->pbNonce = cipher->cipher.iv.buffer;
+ cipher->auth_info_ptr->cbNonce = (ULONG)cipher->cipher.iv.len;
+ cipher->auth_info_ptr->dwFlags = BCRYPT_AUTH_MODE_CHAIN_CALLS_FLAG;
+ cipher->auth_info_ptr->pbTag = cipher->cipher.tag.buffer;
+ cipher->auth_info_ptr->cbTag = (ULONG)cipher->cipher.tag.len;
+ cipher->auth_info_ptr->pbMacContext = cipher->working_mac_buffer.buffer;
+ cipher->auth_info_ptr->cbMacContext = (ULONG)cipher->working_mac_buffer.len;
+
+ if (cipher->cipher.aad.len) {
+ cipher->auth_info_ptr->pbAuthData = (PUCHAR)cipher->cipher.aad.buffer;
+ cipher->auth_info_ptr->cbAuthData = (ULONG)cipher->cipher.aad.len;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Free up as few resources as possible so we can quickly reuse the cipher. */
+static void s_clear_reusable_components(struct aws_symmetric_cipher *cipher) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+ bool working_iv_optimized = cipher->iv.buffer == cipher_impl->working_iv.buffer;
+
+ if (!working_iv_optimized) {
+ aws_byte_buf_secure_zero(&cipher_impl->working_iv);
+ }
+
+ /* These can't always be reused in the next operation, so go ahead and destroy it
+ and create another. */
+ if (cipher_impl->key_handle) {
+ BCryptDestroyKey(cipher_impl->key_handle);
+ cipher_impl->key_handle = NULL;
+ }
+
+ if (cipher_impl->auth_info_ptr) {
+ aws_mem_release(cipher->allocator, cipher_impl->auth_info_ptr);
+ cipher_impl->auth_info_ptr = NULL;
+ }
+
+ aws_byte_buf_secure_zero(&cipher_impl->overflow);
+ aws_byte_buf_secure_zero(&cipher_impl->working_mac_buffer);
+ /* windows handles this, just go ahead and tell the API it's got a length. */
+ cipher_impl->working_mac_buffer.len = AWS_AES_256_CIPHER_BLOCK_SIZE;
+}
+
+static int s_reset_cbc_cipher(struct aws_symmetric_cipher *cipher) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ s_clear_reusable_components(cipher);
+ return s_initialize_cipher_materials(
+ cipher_impl, NULL, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, false, false);
+}
+
+static int s_reset_ctr_cipher(struct aws_symmetric_cipher *cipher) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ s_clear_reusable_components(cipher);
+ struct aws_byte_cursor iv_cur = aws_byte_cursor_from_buf(&cipher->iv);
+ /* reset the working iv back to the original IV. We do this because
+ we're manually maintaining the counter. */
+ aws_byte_buf_append_dynamic(&cipher_impl->working_iv, &iv_cur);
+ return s_initialize_cipher_materials(
+ cipher_impl, NULL, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, true, false);
+}
+
+static int s_reset_gcm_cipher(struct aws_symmetric_cipher *cipher) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ s_clear_reusable_components(cipher);
+ return s_initialize_cipher_materials(
+ cipher_impl, NULL, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, true);
+}
+
+static int s_aes_default_encrypt(
+ struct aws_symmetric_cipher *cipher,
+ const struct aws_byte_cursor *to_encrypt,
+ struct aws_byte_buf *out) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ if (to_encrypt->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ size_t predicted_write_length =
+ cipher_impl->cipher_flags & BCRYPT_BLOCK_PADDING
+ ? to_encrypt->len + (AWS_AES_256_CIPHER_BLOCK_SIZE - (to_encrypt->len % AWS_AES_256_CIPHER_BLOCK_SIZE))
+ : to_encrypt->len;
+
+ ULONG length_written = (ULONG)(predicted_write_length);
+
+ if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, predicted_write_length)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ PUCHAR iv = NULL;
+ ULONG iv_size = 0;
+
+ if (cipher_impl->auth_info_ptr) {
+ iv = cipher_impl->working_iv.buffer;
+ /* this is looking for buffer size, and the working_iv has only been written to by windows the GCM case.
+ * So use capacity rather than length */
+ iv_size = (ULONG)cipher_impl->working_iv.capacity;
+ }
+
+ /* iv was set on the key itself, so we don't need to pass it here. */
+ NTSTATUS status = BCryptEncrypt(
+ cipher_impl->key_handle,
+ to_encrypt->ptr,
+ (ULONG)to_encrypt->len,
+ cipher_impl->auth_info_ptr,
+ iv,
+ iv_size,
+ out->buffer + out->len,
+ (ULONG)(out->capacity - out->len),
+ &length_written,
+ cipher_impl->cipher_flags);
+
+ if (!NT_SUCCESS(status)) {
+ cipher->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ out->len += length_written;
+ return AWS_OP_SUCCESS;
+}
+
+/* manages making sure encryption operations can operate on 16 byte blocks. Stores the excess in the overflow
+ buffer and moves stuff around each time to make sure everything is in order. */
+static struct aws_byte_buf s_fill_in_overflow(
+ struct aws_symmetric_cipher *cipher,
+ const struct aws_byte_cursor *to_operate) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ static const size_t RESERVE_SIZE = AWS_AES_256_CIPHER_BLOCK_SIZE * 2;
+ cipher_impl->cipher_flags = 0;
+
+ struct aws_byte_buf final_to_operate_on;
+ AWS_ZERO_STRUCT(final_to_operate_on);
+
+ if (cipher_impl->overflow.len > 0) {
+ aws_byte_buf_init_copy(&final_to_operate_on, cipher->allocator, &cipher_impl->overflow);
+ aws_byte_buf_append_dynamic(&final_to_operate_on, to_operate);
+ aws_byte_buf_secure_zero(&cipher_impl->overflow);
+ } else {
+ aws_byte_buf_init_copy_from_cursor(&final_to_operate_on, cipher->allocator, *to_operate);
+ }
+
+ size_t overflow = final_to_operate_on.len % RESERVE_SIZE;
+
+ if (final_to_operate_on.len > RESERVE_SIZE) {
+ size_t offset = overflow == 0 ? RESERVE_SIZE : overflow;
+
+ struct aws_byte_cursor slice_for_overflow = aws_byte_cursor_from_buf(&final_to_operate_on);
+ aws_byte_cursor_advance(&slice_for_overflow, final_to_operate_on.len - offset);
+ aws_byte_buf_append_dynamic(&cipher_impl->overflow, &slice_for_overflow);
+ final_to_operate_on.len -= offset;
+ } else {
+ struct aws_byte_cursor final_cur = aws_byte_cursor_from_buf(&final_to_operate_on);
+ aws_byte_buf_append_dynamic(&cipher_impl->overflow, &final_cur);
+ aws_byte_buf_clean_up_secure(&final_to_operate_on);
+ }
+
+ return final_to_operate_on;
+}
+
+static int s_aes_cbc_encrypt(
+ struct aws_symmetric_cipher *cipher,
+ struct aws_byte_cursor to_encrypt,
+ struct aws_byte_buf *out) {
+
+ struct aws_byte_buf final_to_encrypt = s_fill_in_overflow(cipher, &to_encrypt);
+ struct aws_byte_cursor final_cur = aws_byte_cursor_from_buf(&final_to_encrypt);
+ int ret_val = s_aes_default_encrypt(cipher, &final_cur, out);
+ aws_byte_buf_clean_up_secure(&final_to_encrypt);
+
+ return ret_val;
+}
+
+static int s_aes_cbc_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ if (cipher->good && cipher_impl->overflow.len > 0) {
+ cipher_impl->cipher_flags = BCRYPT_BLOCK_PADDING;
+ /* take the rest of the overflow and turn padding on so the remainder is properly padded
+ without timing attack vulnerabilities. */
+ struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow);
+ int ret_val = s_aes_default_encrypt(cipher, &remaining_cur, out);
+ aws_byte_buf_secure_zero(&cipher_impl->overflow);
+ return ret_val;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_default_aes_decrypt(
+ struct aws_symmetric_cipher *cipher,
+ const struct aws_byte_cursor *to_decrypt,
+ struct aws_byte_buf *out) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ if (to_decrypt->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ PUCHAR iv = NULL;
+ ULONG iv_size = 0;
+
+ if (cipher_impl->auth_info_ptr) {
+ iv = cipher_impl->working_iv.buffer;
+ /* this is looking for buffer size, and the working_iv has only been written to by windows the GCM case.
+ * So use capacity rather than length */
+ iv_size = (ULONG)cipher_impl->working_iv.capacity;
+ }
+
+ size_t predicted_write_length = to_decrypt->len;
+ ULONG length_written = (ULONG)(predicted_write_length);
+
+ if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, predicted_write_length)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* iv was set on the key itself, so we don't need to pass it here. */
+ NTSTATUS status = BCryptDecrypt(
+ cipher_impl->key_handle,
+ to_decrypt->ptr,
+ (ULONG)to_decrypt->len,
+ cipher_impl->auth_info_ptr,
+ iv,
+ iv_size,
+ out->buffer + out->len,
+ (ULONG)(out->capacity - out->len),
+ &length_written,
+ cipher_impl->cipher_flags);
+
+ if (!NT_SUCCESS(status)) {
+ cipher->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ out->len += length_written;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aes_cbc_decrypt(
+ struct aws_symmetric_cipher *cipher,
+ struct aws_byte_cursor to_decrypt,
+ struct aws_byte_buf *out) {
+ struct aws_byte_buf final_to_decrypt = s_fill_in_overflow(cipher, &to_decrypt);
+ struct aws_byte_cursor final_cur = aws_byte_cursor_from_buf(&final_to_decrypt);
+ int ret_val = s_default_aes_decrypt(cipher, &final_cur, out);
+ aws_byte_buf_clean_up_secure(&final_to_decrypt);
+
+ return ret_val;
+}
+
+static int s_aes_cbc_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ if (cipher->good && cipher_impl->overflow.len > 0) {
+ cipher_impl->cipher_flags = BCRYPT_BLOCK_PADDING;
+ /* take the rest of the overflow and turn padding on so the remainder is properly padded
+ without timing attack vulnerabilities. */
+ struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow);
+ int ret_val = s_default_aes_decrypt(cipher, &remaining_cur, out);
+ aws_byte_buf_secure_zero(&cipher_impl->overflow);
+ return ret_val;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_symmetric_cipher_vtable s_aes_cbc_vtable = {
+ .alg_name = "AES-CBC 256",
+ .provider = "Windows CNG",
+ .decrypt = s_aes_cbc_decrypt,
+ .encrypt = s_aes_cbc_encrypt,
+ .finalize_encryption = s_aes_cbc_finalize_encryption,
+ .finalize_decryption = s_aes_cbc_finalize_decryption,
+ .destroy = s_aes_default_destroy,
+ .reset = s_reset_cbc_cipher,
+};
+
+struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv) {
+
+ aws_thread_call_once(&s_aes_thread_once, s_load_alg_handles, NULL);
+
+ struct aes_bcrypt_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct aes_bcrypt_cipher));
+
+ cipher->cipher.allocator = allocator;
+ cipher->cipher.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE;
+ cipher->cipher.key_length_bits = AWS_AES_256_KEY_BIT_LEN;
+ cipher->alg_handle = s_aes_cbc_algorithm_handle;
+ cipher->cipher.vtable = &s_aes_cbc_vtable;
+
+ if (s_initialize_cipher_materials(cipher, key, iv, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, false, false) !=
+ AWS_OP_SUCCESS) {
+ goto error;
+ }
+
+ aws_byte_buf_init(&cipher->overflow, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE * 2);
+ cipher->working_iv = cipher->cipher.iv;
+ /* make sure the cleanup doesn't do anything. */
+ cipher->working_iv.allocator = NULL;
+ cipher->cipher.impl = cipher;
+ cipher->cipher.good = true;
+
+ return &cipher->cipher;
+
+error:
+ return NULL;
+}
+
+/* the buffer management for this mode is a good deal easier because we don't care about padding.
+ We do care about keeping the final buffer less than a block size til the finalize call so we can
+ turn the auth chaining flag off and compute the GMAC correctly. */
+static int s_aes_gcm_encrypt(
+ struct aws_symmetric_cipher *cipher,
+ struct aws_byte_cursor to_encrypt,
+ struct aws_byte_buf *out) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ if (to_encrypt.len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_byte_buf working_buffer;
+ AWS_ZERO_STRUCT(working_buffer);
+
+ /* If there's overflow, prepend it to the working buffer, then append the data to encrypt */
+ if (cipher_impl->overflow.len) {
+ struct aws_byte_cursor overflow_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow);
+
+ aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, overflow_cur);
+ aws_byte_buf_reset(&cipher_impl->overflow, true);
+ aws_byte_buf_append_dynamic(&working_buffer, &to_encrypt);
+ } else {
+ aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, to_encrypt);
+ }
+
+ int ret_val = AWS_OP_ERR;
+
+ /* whatever is remaining in an incomplete block, copy it to the overflow. If we don't have a full block
+ wait til next time or for the finalize call. */
+ if (working_buffer.len > AWS_AES_256_CIPHER_BLOCK_SIZE) {
+ size_t offset = working_buffer.len % AWS_AES_256_CIPHER_BLOCK_SIZE;
+ size_t seek_to = working_buffer.len - (AWS_AES_256_CIPHER_BLOCK_SIZE + offset);
+ struct aws_byte_cursor working_buf_cur = aws_byte_cursor_from_buf(&working_buffer);
+ struct aws_byte_cursor working_slice = aws_byte_cursor_advance(&working_buf_cur, seek_to);
+ /* this is just here to make it obvious. The previous line advanced working_buf_cur to where the
+ new overfloew should be. */
+ struct aws_byte_cursor new_overflow_cur = working_buf_cur;
+ aws_byte_buf_append_dynamic(&cipher_impl->overflow, &new_overflow_cur);
+
+ ret_val = s_aes_default_encrypt(cipher, &working_slice, out);
+ } else {
+ struct aws_byte_cursor working_buffer_cur = aws_byte_cursor_from_buf(&working_buffer);
+ aws_byte_buf_append_dynamic(&cipher_impl->overflow, &working_buffer_cur);
+ ret_val = AWS_OP_SUCCESS;
+ }
+ aws_byte_buf_clean_up_secure(&working_buffer);
+ return ret_val;
+}
+
+static int s_aes_gcm_decrypt(
+ struct aws_symmetric_cipher *cipher,
+ struct aws_byte_cursor to_decrypt,
+ struct aws_byte_buf *out) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ if (to_decrypt.len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_byte_buf working_buffer;
+ AWS_ZERO_STRUCT(working_buffer);
+
+ /* If there's overflow, prepend it to the working buffer, then append the data to encrypt */
+ if (cipher_impl->overflow.len) {
+ struct aws_byte_cursor overflow_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow);
+
+ aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, overflow_cur);
+ aws_byte_buf_reset(&cipher_impl->overflow, true);
+ aws_byte_buf_append_dynamic(&working_buffer, &to_decrypt);
+ } else {
+ aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, to_decrypt);
+ }
+
+ int ret_val = AWS_OP_ERR;
+
+ /* whatever is remaining in an incomplete block, copy it to the overflow. If we don't have a full block
+ wait til next time or for the finalize call. */
+ if (working_buffer.len > AWS_AES_256_CIPHER_BLOCK_SIZE) {
+ size_t offset = working_buffer.len % AWS_AES_256_CIPHER_BLOCK_SIZE;
+ size_t seek_to = working_buffer.len - (AWS_AES_256_CIPHER_BLOCK_SIZE + offset);
+ struct aws_byte_cursor working_buf_cur = aws_byte_cursor_from_buf(&working_buffer);
+ struct aws_byte_cursor working_slice = aws_byte_cursor_advance(&working_buf_cur, seek_to);
+ /* this is just here to make it obvious. The previous line advanced working_buf_cur to where the
+ new overfloew should be. */
+ struct aws_byte_cursor new_overflow_cur = working_buf_cur;
+ aws_byte_buf_append_dynamic(&cipher_impl->overflow, &new_overflow_cur);
+
+ ret_val = s_default_aes_decrypt(cipher, &working_slice, out);
+ } else {
+ struct aws_byte_cursor working_buffer_cur = aws_byte_cursor_from_buf(&working_buffer);
+ aws_byte_buf_append_dynamic(&cipher_impl->overflow, &working_buffer_cur);
+ ret_val = AWS_OP_SUCCESS;
+ }
+ aws_byte_buf_clean_up_secure(&working_buffer);
+ return ret_val;
+}
+
+static int s_aes_gcm_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ cipher_impl->auth_info_ptr->dwFlags &= ~BCRYPT_AUTH_MODE_CHAIN_CALLS_FLAG;
+ /* take whatever is remaining, make the final encrypt call with the auth chain flag turned off. */
+ struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow);
+ int ret_val = s_aes_default_encrypt(cipher, &remaining_cur, out);
+ aws_byte_buf_secure_zero(&cipher_impl->overflow);
+ aws_byte_buf_secure_zero(&cipher_impl->working_iv);
+ return ret_val;
+}
+
+static int s_aes_gcm_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+ cipher_impl->auth_info_ptr->dwFlags &= ~BCRYPT_AUTH_MODE_CHAIN_CALLS_FLAG;
+ /* take whatever is remaining, make the final decrypt call with the auth chain flag turned off. */
+ struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow);
+ int ret_val = s_default_aes_decrypt(cipher, &remaining_cur, out);
+ aws_byte_buf_secure_zero(&cipher_impl->overflow);
+ aws_byte_buf_secure_zero(&cipher_impl->working_iv);
+ return ret_val;
+}
+
+static struct aws_symmetric_cipher_vtable s_aes_gcm_vtable = {
+ .alg_name = "AES-GCM 256",
+ .provider = "Windows CNG",
+ .decrypt = s_aes_gcm_decrypt,
+ .encrypt = s_aes_gcm_encrypt,
+ .finalize_encryption = s_aes_gcm_finalize_encryption,
+ .finalize_decryption = s_aes_gcm_finalize_decryption,
+ .destroy = s_aes_default_destroy,
+ .reset = s_reset_gcm_cipher,
+};
+
+struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv,
+ const struct aws_byte_cursor *aad,
+ const struct aws_byte_cursor *decryption_tag) {
+
+ aws_thread_call_once(&s_aes_thread_once, s_load_alg_handles, NULL);
+ struct aes_bcrypt_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct aes_bcrypt_cipher));
+
+ cipher->cipher.allocator = allocator;
+ cipher->cipher.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE;
+ cipher->cipher.key_length_bits = AWS_AES_256_KEY_BIT_LEN;
+ cipher->alg_handle = s_aes_gcm_algorithm_handle;
+ cipher->cipher.vtable = &s_aes_gcm_vtable;
+
+ /* GCM does the counting under the hood, so we let it handle the final 4 bytes of the IV. */
+ if (s_initialize_cipher_materials(
+ cipher, key, iv, decryption_tag, aad, AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, true) != AWS_OP_SUCCESS) {
+ goto error;
+ }
+
+ aws_byte_buf_init(&cipher->overflow, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE * 2);
+ aws_byte_buf_init(&cipher->working_iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE);
+ aws_byte_buf_secure_zero(&cipher->working_iv);
+
+ cipher->cipher.impl = cipher;
+ cipher->cipher.good = true;
+
+ return &cipher->cipher;
+
+error:
+ if (cipher != NULL) {
+ s_aes_default_destroy(&cipher->cipher);
+ }
+
+ return NULL;
+}
+
+/* Take a and b, XOR them and store it in dest. Notice the XOR is done up to the length of the smallest input.
+ If there's a bug in here, it's being hit inside the finalize call when there's an input stream that isn't an even
+ multiple of 16.
+ */
+static int s_xor_cursors(const struct aws_byte_cursor *a, const struct aws_byte_cursor *b, struct aws_byte_buf *dest) {
+ size_t min_size = aws_min_size(b->len, a->len);
+
+ if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(dest, min_size)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* If the profiler is saying this is slow, SIMD the loop below. */
+ uint8_t *array_ref = dest->buffer + dest->len;
+
+ for (size_t i = 0; i < min_size; ++i) {
+ array_ref[i] = a->ptr[i] ^ b->ptr[i];
+ }
+
+ dest->len += min_size;
+
+ return AWS_OP_SUCCESS;
+}
+
+/* There is no CTR mode on windows. Instead, we use AES ECB to encrypt the IV a block at a time.
+ That value is then XOR'd with the to_encrypt cursor and appended to out. The counter then needs
+ to be incremented by 1 for the next call. This has to be done a block at a time, so we slice
+ to_encrypt into a cursor per block and do this process for each block. Also notice that CTR mode
+ is symmetric for encryption and decryption (encrypt and decrypt are the same thing). */
+static int s_aes_ctr_encrypt(
+ struct aws_symmetric_cipher *cipher,
+ struct aws_byte_cursor to_encrypt,
+ struct aws_byte_buf *out) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ if (to_encrypt.len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_byte_buf working_buffer;
+ AWS_ZERO_STRUCT(working_buffer);
+
+ /* prepend overflow to the working buffer and then append to_encrypt to it. */
+ if (cipher_impl->overflow.len && to_encrypt.ptr != cipher_impl->overflow.buffer) {
+ struct aws_byte_cursor overflow_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow);
+ aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, overflow_cur);
+ aws_byte_buf_reset(&cipher_impl->overflow, true);
+ aws_byte_buf_append_dynamic(&working_buffer, &to_encrypt);
+ } else {
+ aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, to_encrypt);
+ }
+
+ /* slice working_buffer into a slice per block. */
+ struct aws_array_list sliced_buffers;
+ aws_array_list_init_dynamic(
+ &sliced_buffers,
+ cipher->allocator,
+ (to_encrypt.len / AWS_AES_256_CIPHER_BLOCK_SIZE) + 1,
+ sizeof(struct aws_byte_cursor));
+
+ struct aws_byte_cursor working_buf_cur = aws_byte_cursor_from_buf(&working_buffer);
+ while (working_buf_cur.len) {
+ struct aws_byte_cursor slice = working_buf_cur;
+
+ if (working_buf_cur.len >= AWS_AES_256_CIPHER_BLOCK_SIZE) {
+ slice = aws_byte_cursor_advance(&working_buf_cur, AWS_AES_256_CIPHER_BLOCK_SIZE);
+ } else {
+ aws_byte_cursor_advance(&working_buf_cur, slice.len);
+ }
+
+ aws_array_list_push_back(&sliced_buffers, &slice);
+ }
+
+ int ret_val = AWS_OP_ERR;
+
+ size_t sliced_buffers_cnt = aws_array_list_length(&sliced_buffers);
+
+ /* for each slice, if it's a full block, do ECB on the IV, xor it to the slice, and then increment the counter. */
+ for (size_t i = 0; i < sliced_buffers_cnt; ++i) {
+ struct aws_byte_cursor buffer_cur;
+ AWS_ZERO_STRUCT(buffer_cur);
+
+ aws_array_list_get_at(&sliced_buffers, &buffer_cur, i);
+ if (buffer_cur.len == AWS_AES_256_CIPHER_BLOCK_SIZE ||
+ /* this part of the branch is for handling the finalize call, which does not have to be on an even
+ block boundary. */
+ (cipher_impl->overflow.len > 0 && sliced_buffers_cnt) == 1) {
+
+ ULONG lengthWritten = (ULONG)AWS_AES_256_CIPHER_BLOCK_SIZE;
+ uint8_t temp_buffer[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0};
+ struct aws_byte_cursor temp_cur = aws_byte_cursor_from_array(temp_buffer, sizeof(temp_buffer));
+
+ NTSTATUS status = BCryptEncrypt(
+ cipher_impl->key_handle,
+ cipher_impl->working_iv.buffer,
+ (ULONG)cipher_impl->working_iv.len,
+ NULL,
+ NULL,
+ 0,
+ temp_cur.ptr,
+ (ULONG)temp_cur.len,
+ &lengthWritten,
+ cipher_impl->cipher_flags);
+
+ if (!NT_SUCCESS(status)) {
+ cipher->good = false;
+ ret_val = aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto clean_up;
+ }
+
+ /* this does the XOR, after this call the final encrypted output is added to out. */
+ if (s_xor_cursors(&buffer_cur, &temp_cur, out)) {
+ ret_val = AWS_OP_ERR;
+ goto clean_up;
+ }
+
+ /* increment the counter. Get the buffers aligned for it first though. */
+ size_t counter_offset = AWS_AES_256_CIPHER_BLOCK_SIZE - sizeof(uint32_t);
+ struct aws_byte_buf counter_buf = cipher_impl->working_iv;
+ /* roll it back 4 so the write works. */
+ counter_buf.len = counter_offset;
+ struct aws_byte_cursor counter_cur = aws_byte_cursor_from_buf(&cipher_impl->working_iv);
+ aws_byte_cursor_advance(&counter_cur, counter_offset);
+
+ /* read current counter value as a Big-endian 32-bit integer*/
+ uint32_t counter = 0;
+ aws_byte_cursor_read_be32(&counter_cur, &counter);
+
+ /* check for overflow here. */
+ if (aws_add_u32_checked(counter, 1, &counter) != AWS_OP_SUCCESS) {
+ cipher->good = false;
+ ret_val = AWS_OP_ERR;
+ goto clean_up;
+ }
+ /* put the incremented counter back. */
+ aws_byte_buf_write_be32(&counter_buf, counter);
+ } else {
+ /* otherwise dump it into the overflow and wait til the next call */
+ aws_byte_buf_append_dynamic(&cipher_impl->overflow, &buffer_cur);
+ }
+
+ ret_val = AWS_OP_SUCCESS;
+ }
+
+clean_up:
+ aws_array_list_clean_up_secure(&sliced_buffers);
+ aws_byte_buf_clean_up_secure(&working_buffer);
+
+ return ret_val;
+}
+
+static int s_aes_ctr_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow);
+ /* take the final overflow, and do the final encrypt call for it. */
+ int ret_val = s_aes_ctr_encrypt(cipher, remaining_cur, out);
+ aws_byte_buf_secure_zero(&cipher_impl->overflow);
+ aws_byte_buf_secure_zero(&cipher_impl->working_iv);
+ return ret_val;
+}
+
+static struct aws_symmetric_cipher_vtable s_aes_ctr_vtable = {
+ .alg_name = "AES-CTR 256",
+ .provider = "Windows CNG",
+ .decrypt = s_aes_ctr_encrypt,
+ .encrypt = s_aes_ctr_encrypt,
+ .finalize_encryption = s_aes_ctr_finalize_encryption,
+ .finalize_decryption = s_aes_ctr_finalize_encryption,
+ .destroy = s_aes_default_destroy,
+ .reset = s_reset_ctr_cipher,
+};
+
+struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv) {
+
+ aws_thread_call_once(&s_aes_thread_once, s_load_alg_handles, NULL);
+ struct aes_bcrypt_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct aes_bcrypt_cipher));
+
+ cipher->cipher.allocator = allocator;
+ cipher->cipher.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE;
+ cipher->cipher.key_length_bits = AWS_AES_256_KEY_BIT_LEN;
+ cipher->alg_handle = s_aes_ctr_algorithm_handle;
+ cipher->cipher.vtable = &s_aes_ctr_vtable;
+
+ if (s_initialize_cipher_materials(cipher, key, iv, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, true, false) !=
+ AWS_OP_SUCCESS) {
+ goto error;
+ }
+
+ aws_byte_buf_init(&cipher->overflow, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE * 2);
+ aws_byte_buf_init_copy(&cipher->working_iv, allocator, &cipher->cipher.iv);
+
+ cipher->cipher.impl = cipher;
+ cipher->cipher.good = true;
+
+ return &cipher->cipher;
+
+error:
+ if (cipher != NULL) {
+ s_aes_default_destroy(&cipher->cipher);
+ }
+
+ return NULL;
+}
+
+/* This is just an encrypted key. Append them to a buffer and on finalize export/import the key using AES keywrap. */
+static int s_key_wrap_encrypt_decrypt(
+ struct aws_symmetric_cipher *cipher,
+ const struct aws_byte_cursor input,
+ struct aws_byte_buf *out) {
+ (void)out;
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ return aws_byte_buf_append_dynamic(&cipher_impl->overflow, &input);
+}
+
+/* Import the buffer we've been appending to as an AES key. Then export it using AES Keywrap format. */
+static int s_keywrap_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ BCRYPT_KEY_HANDLE key_handle_to_encrypt =
+ s_import_key_blob(s_aes_keywrap_algorithm_handle, cipher->allocator, &cipher_impl->overflow);
+
+ if (!key_handle_to_encrypt) {
+ return AWS_OP_ERR;
+ }
+
+ NTSTATUS status = 0;
+
+ ULONG output_size = 0;
+ /* Call with NULL first to get the required size. */
+ status = BCryptExportKey(
+ key_handle_to_encrypt, cipher_impl->key_handle, BCRYPT_AES_WRAP_KEY_BLOB, NULL, 0, &output_size, 0);
+
+ if (!NT_SUCCESS(status)) {
+ cipher->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ int ret_val = AWS_OP_ERR;
+
+ if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, output_size)) {
+ goto clean_up;
+ }
+
+ /* now actually export the key */
+ ULONG len_written = 0;
+ status = BCryptExportKey(
+ key_handle_to_encrypt,
+ cipher_impl->key_handle,
+ BCRYPT_AES_WRAP_KEY_BLOB,
+ out->buffer + out->len,
+ output_size,
+ &len_written,
+ 0);
+
+ if (!NT_SUCCESS(status)) {
+ cipher->good = false;
+ goto clean_up;
+ }
+
+ out->len += len_written;
+
+ ret_val = AWS_OP_SUCCESS;
+
+clean_up:
+ if (key_handle_to_encrypt) {
+ BCryptDestroyKey(key_handle_to_encrypt);
+ }
+
+ return ret_val;
+}
+
+/* Import the buffer we've been appending to as an AES Key Wrapped key. Then export the raw AES key. */
+
+static int s_keywrap_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ BCRYPT_KEY_HANDLE import_key = NULL;
+
+ /* use the cipher key to import the buffer as an AES keywrapped key. */
+ NTSTATUS status = BCryptImportKey(
+ s_aes_keywrap_algorithm_handle,
+ cipher_impl->key_handle,
+ BCRYPT_AES_WRAP_KEY_BLOB,
+ &import_key,
+ NULL,
+ 0,
+ cipher_impl->overflow.buffer,
+ (ULONG)cipher_impl->overflow.len,
+ 0);
+ int ret_val = AWS_OP_ERR;
+
+ if (NT_SUCCESS(status) && import_key) {
+ ULONG export_size = 0;
+
+ struct aws_byte_buf key_data_blob;
+ aws_byte_buf_init(
+ &key_data_blob, cipher->allocator, sizeof(BCRYPT_KEY_DATA_BLOB_HEADER) + cipher_impl->overflow.len);
+
+ /* Now just export the key out as a raw AES key. */
+ status = BCryptExportKey(
+ import_key,
+ NULL,
+ BCRYPT_KEY_DATA_BLOB,
+ key_data_blob.buffer,
+ (ULONG)key_data_blob.capacity,
+ &export_size,
+ 0);
+
+ key_data_blob.len += export_size;
+
+ if (NT_SUCCESS(status)) {
+
+ if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, export_size)) {
+ goto clean_up;
+ }
+
+ BCRYPT_KEY_DATA_BLOB_HEADER *stream_header = (BCRYPT_KEY_DATA_BLOB_HEADER *)key_data_blob.buffer;
+
+ AWS_FATAL_ASSERT(
+ aws_byte_buf_write(
+ out, key_data_blob.buffer + sizeof(BCRYPT_KEY_DATA_BLOB_HEADER), stream_header->cbKeyData) &&
+ "Copying key data failed but the allocation should have already occured successfully");
+ ret_val = AWS_OP_SUCCESS;
+
+ } else {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ cipher->good = false;
+ }
+
+ clean_up:
+ aws_byte_buf_clean_up_secure(&key_data_blob);
+ BCryptDestroyKey(import_key);
+
+ } else {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ cipher->good = false;
+ }
+
+ return ret_val;
+}
+
+static int s_reset_keywrap_cipher(struct aws_symmetric_cipher *cipher) {
+ struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+ s_clear_reusable_components(cipher);
+
+ return s_initialize_cipher_materials(cipher_impl, NULL, NULL, NULL, NULL, 0, false, false);
+}
+
+static struct aws_symmetric_cipher_vtable s_aes_keywrap_vtable = {
+ .alg_name = "AES-KEYWRAP 256",
+ .provider = "Windows CNG",
+ .decrypt = s_key_wrap_encrypt_decrypt,
+ .encrypt = s_key_wrap_encrypt_decrypt,
+ .finalize_encryption = s_keywrap_finalize_encryption,
+ .finalize_decryption = s_keywrap_finalize_decryption,
+ .destroy = s_aes_default_destroy,
+ .reset = s_reset_keywrap_cipher,
+};
+
+struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key) {
+
+ aws_thread_call_once(&s_aes_thread_once, s_load_alg_handles, NULL);
+ struct aes_bcrypt_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct aes_bcrypt_cipher));
+
+ cipher->cipher.allocator = allocator;
+ cipher->cipher.block_size = 8;
+ cipher->cipher.key_length_bits = AWS_AES_256_KEY_BIT_LEN;
+ cipher->alg_handle = s_aes_keywrap_algorithm_handle;
+ cipher->cipher.vtable = &s_aes_keywrap_vtable;
+
+ if (s_initialize_cipher_materials(cipher, key, NULL, NULL, NULL, 0, false, false) != AWS_OP_SUCCESS) {
+ goto error;
+ }
+
+ aws_byte_buf_init(&cipher->overflow, allocator, (AWS_AES_256_CIPHER_BLOCK_SIZE * 2) + 8);
+
+ cipher->cipher.impl = cipher;
+ cipher->cipher.good = true;
+
+ return &cipher->cipher;
+
+error:
+ if (cipher != NULL) {
+ s_aes_default_destroy(&cipher->cipher);
+ }
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_ecc.c b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_ecc.c
new file mode 100644
index 0000000000..a9e890d055
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_ecc.c
@@ -0,0 +1,485 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/cal/private/ecc.h>
+
+#include <aws/cal/cal.h>
+#include <aws/cal/private/der.h>
+
+#include <aws/common/thread.h>
+
+#include <windows.h>
+
+#include <bcrypt.h>
+
+#include <winerror.h>
+
+static BCRYPT_ALG_HANDLE s_ecdsa_p256_alg = NULL;
+static BCRYPT_ALG_HANDLE s_ecdsa_p384_alg = NULL;
+
+/* size of the P384 curve's signatures. This is the largest we support at the moment.
+ Since msvc doesn't support variable length arrays, we need to handle this with a macro. */
+#define MAX_SIGNATURE_LENGTH (48 * 2)
+
+static aws_thread_once s_ecdsa_thread_once = AWS_THREAD_ONCE_STATIC_INIT;
+
+static void s_load_alg_handle(void *user_data) {
+ (void)user_data;
+ /* this function is incredibly slow, LET IT LEAK*/
+ NTSTATUS status =
+ BCryptOpenAlgorithmProvider(&s_ecdsa_p256_alg, BCRYPT_ECDSA_P256_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0);
+ AWS_ASSERT(s_ecdsa_p256_alg && "BCryptOpenAlgorithmProvider() failed");
+
+ status = BCryptOpenAlgorithmProvider(&s_ecdsa_p384_alg, BCRYPT_ECDSA_P384_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0);
+ AWS_ASSERT(s_ecdsa_p384_alg && "BCryptOpenAlgorithmProvider() failed");
+
+ (void)status;
+}
+
+struct bcrypt_ecc_key_pair {
+ struct aws_ecc_key_pair key_pair;
+ BCRYPT_KEY_HANDLE key_handle;
+};
+
+static BCRYPT_ALG_HANDLE s_key_alg_handle_from_curve_name(enum aws_ecc_curve_name curve_name) {
+ switch (curve_name) {
+ case AWS_CAL_ECDSA_P256:
+ return s_ecdsa_p256_alg;
+ case AWS_CAL_ECDSA_P384:
+ return s_ecdsa_p384_alg;
+ default:
+ return 0;
+ }
+}
+
+static ULONG s_get_magic_from_curve_name(enum aws_ecc_curve_name curve_name, bool private_key) {
+ switch (curve_name) {
+ case AWS_CAL_ECDSA_P256:
+ return private_key ? BCRYPT_ECDSA_PRIVATE_P256_MAGIC : BCRYPT_ECDSA_PUBLIC_P256_MAGIC;
+ case AWS_CAL_ECDSA_P384:
+ return private_key ? BCRYPT_ECDSA_PRIVATE_P384_MAGIC : BCRYPT_ECDSA_PUBLIC_P384_MAGIC;
+ default:
+ return 0;
+ }
+}
+
+static void s_destroy_key(struct aws_ecc_key_pair *key_pair) {
+ if (key_pair) {
+ struct bcrypt_ecc_key_pair *key_impl = key_pair->impl;
+
+ if (key_impl->key_handle) {
+ BCryptDestroyKey(key_impl->key_handle);
+ }
+
+ aws_byte_buf_clean_up_secure(&key_pair->key_buf);
+ aws_mem_release(key_pair->allocator, key_impl);
+ }
+}
+
+static size_t s_signature_length(const struct aws_ecc_key_pair *key_pair) {
+ static size_t s_der_overhead = 8;
+ return s_der_overhead + aws_ecc_key_coordinate_byte_size_from_curve_name(key_pair->curve_name) * 2;
+}
+
+static bool s_trim_zeros_predicate(uint8_t value) {
+ return value == 0;
+}
+
+static int s_sign_message(
+ const struct aws_ecc_key_pair *key_pair,
+ const struct aws_byte_cursor *message,
+ struct aws_byte_buf *signature_output) {
+ struct bcrypt_ecc_key_pair *key_impl = key_pair->impl;
+
+ size_t output_buf_space = signature_output->capacity - signature_output->len;
+
+ if (output_buf_space < s_signature_length(key_pair)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ uint8_t temp_signature[MAX_SIGNATURE_LENGTH] = {0};
+ struct aws_byte_buf temp_signature_buf = aws_byte_buf_from_empty_array(temp_signature, sizeof(temp_signature));
+ size_t signature_length = temp_signature_buf.capacity;
+
+ NTSTATUS status = BCryptSignHash(
+ key_impl->key_handle,
+ NULL,
+ message->ptr,
+ (ULONG)message->len,
+ temp_signature_buf.buffer,
+ (ULONG)signature_length,
+ (ULONG *)&signature_length,
+ 0);
+
+ if (status != 0) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ temp_signature_buf.len += signature_length;
+ size_t coordinate_len = temp_signature_buf.len / 2;
+
+ /* okay. Windows doesn't DER encode this to ASN.1, so we need to do it manually. */
+ struct aws_der_encoder *encoder =
+ aws_der_encoder_new(key_pair->allocator, signature_output->capacity - signature_output->len);
+ if (!encoder) {
+ return AWS_OP_ERR;
+ }
+
+ aws_der_encoder_begin_sequence(encoder);
+ struct aws_byte_cursor integer_cur = aws_byte_cursor_from_array(temp_signature_buf.buffer, coordinate_len);
+ /* trim off the leading zero padding for DER encoding */
+ integer_cur = aws_byte_cursor_left_trim_pred(&integer_cur, s_trim_zeros_predicate);
+ aws_der_encoder_write_integer(encoder, integer_cur);
+ integer_cur = aws_byte_cursor_from_array(temp_signature_buf.buffer + coordinate_len, coordinate_len);
+ /* trim off the leading zero padding for DER encoding */
+ integer_cur = aws_byte_cursor_left_trim_pred(&integer_cur, s_trim_zeros_predicate);
+ aws_der_encoder_write_integer(encoder, integer_cur);
+ aws_der_encoder_end_sequence(encoder);
+
+ struct aws_byte_cursor signature_out_cur;
+ AWS_ZERO_STRUCT(signature_out_cur);
+ aws_der_encoder_get_contents(encoder, &signature_out_cur);
+ aws_byte_buf_append(signature_output, &signature_out_cur);
+ aws_der_encoder_destroy(encoder);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_derive_public_key(struct aws_ecc_key_pair *key_pair) {
+ struct bcrypt_ecc_key_pair *key_impl = key_pair->impl;
+
+ ULONG result = 0;
+ NTSTATUS status = BCryptExportKey(
+ key_impl->key_handle,
+ NULL,
+ BCRYPT_ECCPRIVATE_BLOB,
+ key_pair->key_buf.buffer,
+ (ULONG)key_pair->key_buf.capacity,
+ &result,
+ 0);
+ key_pair->key_buf.len = result;
+ (void)result;
+
+ if (status) {
+ return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_append_coordinate(
+ struct aws_byte_buf *buffer,
+ struct aws_byte_cursor *coordinate,
+ enum aws_ecc_curve_name curve_name) {
+
+ size_t coordinate_size = aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name);
+ if (coordinate->len < coordinate_size) {
+ size_t leading_zero_count = coordinate_size - coordinate->len;
+ AWS_FATAL_ASSERT(leading_zero_count + buffer->len <= buffer->capacity);
+
+ memset(buffer->buffer + buffer->len, 0, leading_zero_count);
+ buffer->len += leading_zero_count;
+ }
+
+ return aws_byte_buf_append(buffer, coordinate);
+}
+
+static int s_verify_signature(
+ const struct aws_ecc_key_pair *key_pair,
+ const struct aws_byte_cursor *message,
+ const struct aws_byte_cursor *signature) {
+ struct bcrypt_ecc_key_pair *key_impl = key_pair->impl;
+
+ /* OKAY Windows doesn't do the whole standard internet formats thing. So we need to manually decode
+ the DER encoded ASN.1 format first.*/
+ uint8_t temp_signature[MAX_SIGNATURE_LENGTH] = {0};
+ struct aws_byte_buf temp_signature_buf = aws_byte_buf_from_empty_array(temp_signature, sizeof(temp_signature));
+
+ struct aws_byte_cursor der_encoded_signature = aws_byte_cursor_from_array(signature->ptr, signature->len);
+
+ struct aws_der_decoder *decoder = aws_der_decoder_new(key_pair->allocator, der_encoded_signature);
+ if (!decoder) {
+ return AWS_OP_ERR;
+ }
+
+ if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_type(decoder) != AWS_DER_SEQUENCE) {
+ aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED);
+ goto error;
+ }
+
+ if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_type(decoder) != AWS_DER_INTEGER) {
+ aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED);
+ goto error;
+ }
+
+ /* there will be two coordinates. They need to be concatenated together. */
+ struct aws_byte_cursor coordinate;
+ AWS_ZERO_STRUCT(coordinate);
+ if (aws_der_decoder_tlv_integer(decoder, &coordinate)) {
+ aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED);
+ goto error;
+ }
+
+ if (s_append_coordinate(&temp_signature_buf, &coordinate, key_pair->curve_name)) {
+ goto error;
+ }
+
+ if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_type(decoder) != AWS_DER_INTEGER) {
+ aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED);
+ goto error;
+ }
+ AWS_ZERO_STRUCT(coordinate);
+ if (aws_der_decoder_tlv_integer(decoder, &coordinate)) {
+ aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED);
+ goto error;
+ }
+
+ if (s_append_coordinate(&temp_signature_buf, &coordinate, key_pair->curve_name)) {
+ goto error;
+ }
+
+ aws_der_decoder_destroy(decoder);
+
+ /* okay, now we've got a windows compatible signature, let's verify it. */
+ NTSTATUS status = BCryptVerifySignature(
+ key_impl->key_handle,
+ NULL,
+ message->ptr,
+ (ULONG)message->len,
+ temp_signature_buf.buffer,
+ (ULONG)temp_signature_buf.len,
+ 0);
+
+ return status == 0 ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED);
+
+error:
+ if (decoder) {
+ aws_der_decoder_destroy(decoder);
+ }
+ return AWS_OP_ERR;
+}
+
+static struct aws_ecc_key_pair_vtable s_vtable = {
+ .destroy = s_destroy_key,
+ .derive_pub_key = s_derive_public_key,
+ .sign_message = s_sign_message,
+ .verify_signature = s_verify_signature,
+ .signature_length = s_signature_length,
+};
+
+static struct aws_ecc_key_pair *s_alloc_pair_and_init_buffers(
+ struct aws_allocator *allocator,
+ enum aws_ecc_curve_name curve_name,
+ struct aws_byte_cursor pub_x,
+ struct aws_byte_cursor pub_y,
+ struct aws_byte_cursor priv_key) {
+
+ aws_thread_call_once(&s_ecdsa_thread_once, s_load_alg_handle, NULL);
+
+ struct bcrypt_ecc_key_pair *key_impl = aws_mem_calloc(allocator, 1, sizeof(struct bcrypt_ecc_key_pair));
+
+ if (!key_impl) {
+ return NULL;
+ }
+
+ key_impl->key_pair.allocator = allocator;
+ key_impl->key_pair.curve_name = curve_name;
+ key_impl->key_pair.impl = key_impl;
+ key_impl->key_pair.vtable = &s_vtable;
+ aws_atomic_init_int(&key_impl->key_pair.ref_count, 1);
+
+ size_t s_key_coordinate_size = aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name);
+
+ if (!s_key_coordinate_size) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto error;
+ }
+
+ if ((pub_x.ptr && pub_x.len != s_key_coordinate_size) || (pub_y.ptr && pub_y.len != s_key_coordinate_size) ||
+ (priv_key.ptr && priv_key.len != s_key_coordinate_size)) {
+ aws_raise_error(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM);
+ goto error;
+ }
+
+ size_t total_buffer_size = s_key_coordinate_size * 3 + sizeof(BCRYPT_ECCKEY_BLOB);
+
+ if (aws_byte_buf_init(&key_impl->key_pair.key_buf, allocator, total_buffer_size)) {
+ goto error;
+ }
+
+ aws_byte_buf_secure_zero(&key_impl->key_pair.key_buf);
+
+ BCRYPT_ECCKEY_BLOB key_blob;
+ AWS_ZERO_STRUCT(key_blob);
+ key_blob.dwMagic = s_get_magic_from_curve_name(curve_name, priv_key.ptr && priv_key.len);
+ key_blob.cbKey = (ULONG)s_key_coordinate_size;
+
+ struct aws_byte_cursor header = aws_byte_cursor_from_array(&key_blob, sizeof(key_blob));
+ aws_byte_buf_append(&key_impl->key_pair.key_buf, &header);
+
+ LPCWSTR blob_type = BCRYPT_ECCPUBLIC_BLOB;
+ ULONG flags = 0;
+ if (pub_x.ptr && pub_y.ptr) {
+ aws_byte_buf_append(&key_impl->key_pair.key_buf, &pub_x);
+ aws_byte_buf_append(&key_impl->key_pair.key_buf, &pub_y);
+ } else {
+ key_impl->key_pair.key_buf.len += s_key_coordinate_size * 2;
+ flags = BCRYPT_NO_KEY_VALIDATION;
+ }
+
+ if (priv_key.ptr) {
+ blob_type = BCRYPT_ECCPRIVATE_BLOB;
+ aws_byte_buf_append(&key_impl->key_pair.key_buf, &priv_key);
+ }
+
+ key_impl->key_pair.pub_x =
+ aws_byte_buf_from_array(key_impl->key_pair.key_buf.buffer + sizeof(key_blob), s_key_coordinate_size);
+
+ key_impl->key_pair.pub_y =
+ aws_byte_buf_from_array(key_impl->key_pair.pub_x.buffer + s_key_coordinate_size, s_key_coordinate_size);
+
+ key_impl->key_pair.priv_d =
+ aws_byte_buf_from_array(key_impl->key_pair.pub_y.buffer + s_key_coordinate_size, s_key_coordinate_size);
+
+ BCRYPT_ALG_HANDLE alg_handle = s_key_alg_handle_from_curve_name(curve_name);
+ NTSTATUS status = BCryptImportKeyPair(
+ alg_handle,
+ NULL,
+ blob_type,
+ &key_impl->key_handle,
+ key_impl->key_pair.key_buf.buffer,
+ (ULONG)key_impl->key_pair.key_buf.len,
+ flags);
+
+ if (status) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto error;
+ }
+
+ return &key_impl->key_pair;
+
+error:
+ s_destroy_key(&key_impl->key_pair);
+ return NULL;
+}
+
+struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key_impl(
+ struct aws_allocator *allocator,
+ enum aws_ecc_curve_name curve_name,
+ const struct aws_byte_cursor *priv_key) {
+
+ struct aws_byte_cursor empty;
+ AWS_ZERO_STRUCT(empty);
+ return s_alloc_pair_and_init_buffers(allocator, curve_name, empty, empty, *priv_key);
+}
+
+struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key_impl(
+ struct aws_allocator *allocator,
+ enum aws_ecc_curve_name curve_name,
+ const struct aws_byte_cursor *public_key_x,
+ const struct aws_byte_cursor *public_key_y) {
+
+ struct aws_byte_cursor empty;
+ AWS_ZERO_STRUCT(empty);
+ return s_alloc_pair_and_init_buffers(allocator, curve_name, *public_key_x, *public_key_y, empty);
+}
+
+struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random(
+ struct aws_allocator *allocator,
+ enum aws_ecc_curve_name curve_name) {
+ aws_thread_call_once(&s_ecdsa_thread_once, s_load_alg_handle, NULL);
+
+ struct bcrypt_ecc_key_pair *key_impl = aws_mem_calloc(allocator, 1, sizeof(struct bcrypt_ecc_key_pair));
+
+ if (!key_impl) {
+ return NULL;
+ }
+
+ key_impl->key_pair.allocator = allocator;
+ key_impl->key_pair.curve_name = curve_name;
+ key_impl->key_pair.impl = key_impl;
+ key_impl->key_pair.vtable = &s_vtable;
+ aws_atomic_init_int(&key_impl->key_pair.ref_count, 1);
+
+ size_t key_coordinate_size = aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name);
+
+ if (!key_coordinate_size) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto error;
+ }
+
+ BCRYPT_ALG_HANDLE alg_handle = s_key_alg_handle_from_curve_name(curve_name);
+
+ ULONG key_bit_length = (ULONG)key_coordinate_size * 8;
+ NTSTATUS status = BCryptGenerateKeyPair(alg_handle, &key_impl->key_handle, key_bit_length, 0);
+
+ if (status) {
+ aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE);
+ goto error;
+ }
+
+ status = BCryptFinalizeKeyPair(key_impl->key_handle, 0);
+
+ if (status) {
+ aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE);
+ goto error;
+ }
+
+ size_t total_buffer_size = key_coordinate_size * 3 + sizeof(BCRYPT_ECCKEY_BLOB);
+
+ if (aws_byte_buf_init(&key_impl->key_pair.key_buf, allocator, total_buffer_size)) {
+ goto error;
+ }
+
+ aws_byte_buf_secure_zero(&key_impl->key_pair.key_buf);
+
+ key_impl->key_pair.pub_x =
+ aws_byte_buf_from_array(key_impl->key_pair.key_buf.buffer + sizeof(BCRYPT_ECCKEY_BLOB), key_coordinate_size);
+
+ key_impl->key_pair.pub_y =
+ aws_byte_buf_from_array(key_impl->key_pair.pub_x.buffer + key_coordinate_size, key_coordinate_size);
+
+ key_impl->key_pair.priv_d =
+ aws_byte_buf_from_array(key_impl->key_pair.pub_y.buffer + key_coordinate_size, key_coordinate_size);
+
+ if (s_derive_public_key(&key_impl->key_pair)) {
+ goto error;
+ }
+
+ return &key_impl->key_pair;
+
+error:
+ s_destroy_key(&key_impl->key_pair);
+ return NULL;
+}
+
+struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *encoded_keys) {
+ struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, *encoded_keys);
+
+ /* we could have private key or a public key, or a full pair. */
+ struct aws_byte_cursor pub_x;
+ AWS_ZERO_STRUCT(pub_x);
+ struct aws_byte_cursor pub_y;
+ AWS_ZERO_STRUCT(pub_y);
+ struct aws_byte_cursor priv_d;
+ AWS_ZERO_STRUCT(priv_d);
+
+ enum aws_ecc_curve_name curve_name;
+ if (aws_der_decoder_load_ecc_key_pair(decoder, &pub_x, &pub_y, &priv_d, &curve_name)) {
+ goto error;
+ }
+
+ /* now that we have the buffers, we can just use the normal code path. */
+ struct aws_ecc_key_pair *key_pair = s_alloc_pair_and_init_buffers(allocator, curve_name, pub_x, pub_y, priv_d);
+ aws_der_decoder_destroy(decoder);
+
+ return key_pair;
+error:
+ if (decoder) {
+ aws_der_decoder_destroy(decoder);
+ }
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_hash.c b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_hash.c
new file mode 100644
index 0000000000..b4b93f91b0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_hash.c
@@ -0,0 +1,220 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/cal/hash.h>
+#include <aws/common/thread.h>
+
+#include <windows.h>
+
+#include <bcrypt.h>
+#include <winerror.h>
+
+static BCRYPT_ALG_HANDLE s_sha256_alg = NULL;
+static size_t s_sha256_obj_len = 0;
+static aws_thread_once s_sha256_once = AWS_THREAD_ONCE_STATIC_INIT;
+
+static BCRYPT_ALG_HANDLE s_sha1_alg = NULL;
+static size_t s_sha1_obj_len = 0;
+static aws_thread_once s_sha1_once = AWS_THREAD_ONCE_STATIC_INIT;
+
+static BCRYPT_ALG_HANDLE s_md5_alg = NULL;
+static size_t s_md5_obj_len = 0;
+static aws_thread_once s_md5_once = AWS_THREAD_ONCE_STATIC_INIT;
+
+static void s_destroy(struct aws_hash *hash);
+static int s_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash);
+static int s_finalize(struct aws_hash *hash, struct aws_byte_buf *output);
+
+static struct aws_hash_vtable s_sha256_vtable = {
+ .destroy = s_destroy,
+ .update = s_update,
+ .finalize = s_finalize,
+ .alg_name = "SHA256",
+ .provider = "Windows CNG",
+};
+
+static struct aws_hash_vtable s_sha1_vtable = {
+ .destroy = s_destroy,
+ .update = s_update,
+ .finalize = s_finalize,
+ .alg_name = "SHA1",
+ .provider = "Windows CNG",
+};
+
+static struct aws_hash_vtable s_md5_vtable = {
+ .destroy = s_destroy,
+ .update = s_update,
+ .finalize = s_finalize,
+ .alg_name = "MD5",
+ .provider = "Windows CNG",
+};
+
+struct bcrypt_hash_handle {
+ struct aws_hash hash;
+ BCRYPT_HASH_HANDLE hash_handle;
+ uint8_t *hash_obj;
+};
+
+static void s_load_sha256_alg_handle(void *user_data) {
+ (void)user_data;
+ /* this function is incredibly slow, LET IT LEAK*/
+ (void)BCryptOpenAlgorithmProvider(&s_sha256_alg, BCRYPT_SHA256_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0);
+ AWS_ASSERT(s_sha256_alg);
+ DWORD result_length = 0;
+ (void)BCryptGetProperty(
+ s_sha256_alg, BCRYPT_OBJECT_LENGTH, (PBYTE)&s_sha256_obj_len, sizeof(s_sha256_obj_len), &result_length, 0);
+}
+
+static void s_load_sha1_alg_handle(void *user_data) {
+ (void)user_data;
+ /* this function is incredibly slow, LET IT LEAK*/
+ (void)BCryptOpenAlgorithmProvider(&s_sha1_alg, BCRYPT_SHA1_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0);
+ AWS_ASSERT(s_sha1_alg);
+ DWORD result_length = 0;
+ (void)BCryptGetProperty(
+ s_sha1_alg, BCRYPT_OBJECT_LENGTH, (PBYTE)&s_sha1_obj_len, sizeof(s_sha1_obj_len), &result_length, 0);
+}
+
+static void s_load_md5_alg_handle(void *user_data) {
+ (void)user_data;
+ /* this function is incredibly slow, LET IT LEAK*/
+ (void)BCryptOpenAlgorithmProvider(&s_md5_alg, BCRYPT_MD5_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0);
+ AWS_ASSERT(s_md5_alg);
+ DWORD result_length = 0;
+ (void)BCryptGetProperty(
+ s_md5_alg, BCRYPT_OBJECT_LENGTH, (PBYTE)&s_md5_obj_len, sizeof(s_md5_obj_len), &result_length, 0);
+}
+
+struct aws_hash *aws_sha256_default_new(struct aws_allocator *allocator) {
+ aws_thread_call_once(&s_sha256_once, s_load_sha256_alg_handle, NULL);
+
+ struct bcrypt_hash_handle *bcrypt_hash = NULL;
+ uint8_t *hash_obj = NULL;
+ aws_mem_acquire_many(allocator, 2, &bcrypt_hash, sizeof(struct bcrypt_hash_handle), &hash_obj, s_sha256_obj_len);
+
+ if (!bcrypt_hash) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*bcrypt_hash);
+ bcrypt_hash->hash.allocator = allocator;
+ bcrypt_hash->hash.vtable = &s_sha256_vtable;
+ bcrypt_hash->hash.impl = bcrypt_hash;
+ bcrypt_hash->hash.digest_size = AWS_SHA256_LEN;
+ bcrypt_hash->hash.good = true;
+ bcrypt_hash->hash_obj = hash_obj;
+ NTSTATUS status = BCryptCreateHash(
+ s_sha256_alg, &bcrypt_hash->hash_handle, bcrypt_hash->hash_obj, (ULONG)s_sha256_obj_len, NULL, 0, 0);
+
+ if (((NTSTATUS)status) < 0) {
+ aws_mem_release(allocator, bcrypt_hash);
+ return NULL;
+ }
+
+ return &bcrypt_hash->hash;
+}
+
+struct aws_hash *aws_sha1_default_new(struct aws_allocator *allocator) {
+ aws_thread_call_once(&s_sha1_once, s_load_sha1_alg_handle, NULL);
+
+ struct bcrypt_hash_handle *bcrypt_hash = NULL;
+ uint8_t *hash_obj = NULL;
+ aws_mem_acquire_many(allocator, 2, &bcrypt_hash, sizeof(struct bcrypt_hash_handle), &hash_obj, s_sha1_obj_len);
+
+ if (!bcrypt_hash) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*bcrypt_hash);
+ bcrypt_hash->hash.allocator = allocator;
+ bcrypt_hash->hash.vtable = &s_sha1_vtable;
+ bcrypt_hash->hash.impl = bcrypt_hash;
+ bcrypt_hash->hash.digest_size = AWS_SHA1_LEN;
+ bcrypt_hash->hash.good = true;
+ bcrypt_hash->hash_obj = hash_obj;
+ NTSTATUS status = BCryptCreateHash(
+ s_sha1_alg, &bcrypt_hash->hash_handle, bcrypt_hash->hash_obj, (ULONG)s_sha1_obj_len, NULL, 0, 0);
+
+ if (((NTSTATUS)status) < 0) {
+ aws_mem_release(allocator, bcrypt_hash);
+ return NULL;
+ }
+
+ return &bcrypt_hash->hash;
+}
+
+struct aws_hash *aws_md5_default_new(struct aws_allocator *allocator) {
+ aws_thread_call_once(&s_md5_once, s_load_md5_alg_handle, NULL);
+
+ struct bcrypt_hash_handle *bcrypt_hash = NULL;
+ uint8_t *hash_obj = NULL;
+ aws_mem_acquire_many(allocator, 2, &bcrypt_hash, sizeof(struct bcrypt_hash_handle), &hash_obj, s_md5_obj_len);
+
+ if (!bcrypt_hash) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*bcrypt_hash);
+ bcrypt_hash->hash.allocator = allocator;
+ bcrypt_hash->hash.vtable = &s_md5_vtable;
+ bcrypt_hash->hash.impl = bcrypt_hash;
+ bcrypt_hash->hash.digest_size = AWS_MD5_LEN;
+ bcrypt_hash->hash.good = true;
+ bcrypt_hash->hash_obj = hash_obj;
+ NTSTATUS status =
+ BCryptCreateHash(s_md5_alg, &bcrypt_hash->hash_handle, bcrypt_hash->hash_obj, (ULONG)s_md5_obj_len, NULL, 0, 0);
+
+ if (((NTSTATUS)status) < 0) {
+ aws_mem_release(allocator, bcrypt_hash);
+ return NULL;
+ }
+
+ return &bcrypt_hash->hash;
+}
+
+static void s_destroy(struct aws_hash *hash) {
+ struct bcrypt_hash_handle *ctx = hash->impl;
+ BCryptDestroyHash(ctx->hash_handle);
+ aws_mem_release(hash->allocator, ctx);
+}
+
+static int s_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash) {
+ if (!hash->good) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ struct bcrypt_hash_handle *ctx = hash->impl;
+ NTSTATUS status = BCryptHashData(ctx->hash_handle, to_hash->ptr, (ULONG)to_hash->len, 0);
+
+ if (((NTSTATUS)status) < 0) {
+ hash->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_finalize(struct aws_hash *hash, struct aws_byte_buf *output) {
+ if (!hash->good) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ struct bcrypt_hash_handle *ctx = hash->impl;
+
+ size_t buffer_len = output->capacity - output->len;
+
+ if (buffer_len < hash->digest_size) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ NTSTATUS status = BCryptFinishHash(ctx->hash_handle, output->buffer + output->len, (ULONG)hash->digest_size, 0);
+
+ hash->good = false;
+ if (((NTSTATUS)status) < 0) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ output->len += hash->digest_size;
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_hmac.c b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_hmac.c
new file mode 100644
index 0000000000..3bf4fa061a
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_hmac.c
@@ -0,0 +1,132 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/cal/hmac.h>
+#include <aws/common/thread.h>
+
+#include <windows.h>
+
+#include <bcrypt.h>
+#include <winerror.h>
+
+static BCRYPT_ALG_HANDLE s_sha256_hmac_alg = NULL;
+static size_t s_sha256_hmac_obj_len = 0;
+
+static aws_thread_once s_sha256_hmac_once = AWS_THREAD_ONCE_STATIC_INIT;
+
+static void s_destroy(struct aws_hmac *hash);
+static int s_update(struct aws_hmac *hash, const struct aws_byte_cursor *to_hash);
+static int s_finalize(struct aws_hmac *hash, struct aws_byte_buf *output);
+
+static struct aws_hmac_vtable s_sha256_hmac_vtable = {
+ .destroy = s_destroy,
+ .update = s_update,
+ .finalize = s_finalize,
+ .alg_name = "SHA256 HMAC",
+ .provider = "Windows CNG",
+};
+
+struct bcrypt_hmac_handle {
+ struct aws_hmac hmac;
+ BCRYPT_HASH_HANDLE hash_handle;
+ uint8_t *hash_obj;
+};
+
+static void s_load_alg_handle(void *user_data) {
+ (void)user_data;
+ /* this function is incredibly slow, LET IT LEAK*/
+ BCryptOpenAlgorithmProvider(
+ &s_sha256_hmac_alg, BCRYPT_SHA256_ALGORITHM, MS_PRIMITIVE_PROVIDER, BCRYPT_ALG_HANDLE_HMAC_FLAG);
+ AWS_ASSERT(s_sha256_hmac_alg);
+ DWORD result_length = 0;
+ BCryptGetProperty(
+ s_sha256_hmac_alg,
+ BCRYPT_OBJECT_LENGTH,
+ (PBYTE)&s_sha256_hmac_obj_len,
+ sizeof(s_sha256_hmac_obj_len),
+ &result_length,
+ 0);
+}
+
+struct aws_hmac *aws_sha256_hmac_default_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret) {
+ aws_thread_call_once(&s_sha256_hmac_once, s_load_alg_handle, NULL);
+
+ struct bcrypt_hmac_handle *bcrypt_hmac;
+ uint8_t *hash_obj;
+ aws_mem_acquire_many(
+ allocator, 2, &bcrypt_hmac, sizeof(struct bcrypt_hmac_handle), &hash_obj, s_sha256_hmac_obj_len);
+
+ if (!bcrypt_hmac) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*bcrypt_hmac);
+ bcrypt_hmac->hmac.allocator = allocator;
+ bcrypt_hmac->hmac.vtable = &s_sha256_hmac_vtable;
+ bcrypt_hmac->hmac.impl = bcrypt_hmac;
+ bcrypt_hmac->hmac.digest_size = AWS_SHA256_HMAC_LEN;
+ bcrypt_hmac->hmac.good = true;
+ bcrypt_hmac->hash_obj = hash_obj;
+ NTSTATUS status = BCryptCreateHash(
+ s_sha256_hmac_alg,
+ &bcrypt_hmac->hash_handle,
+ bcrypt_hmac->hash_obj,
+ (ULONG)s_sha256_hmac_obj_len,
+ secret->ptr,
+ (ULONG)secret->len,
+ 0);
+
+ if (((NTSTATUS)status) < 0) {
+ aws_mem_release(allocator, bcrypt_hmac);
+ return NULL;
+ }
+
+ return &bcrypt_hmac->hmac;
+}
+
+static void s_destroy(struct aws_hmac *hmac) {
+ struct bcrypt_hmac_handle *ctx = hmac->impl;
+ BCryptDestroyHash(ctx->hash_handle);
+ aws_mem_release(hmac->allocator, ctx);
+}
+
+static int s_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hash) {
+ if (!hmac->good) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ struct bcrypt_hmac_handle *ctx = hmac->impl;
+ NTSTATUS status = BCryptHashData(ctx->hash_handle, to_hash->ptr, (ULONG)to_hash->len, 0);
+
+ if (((NTSTATUS)status) < 0) {
+ hmac->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output) {
+ if (!hmac->good) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ struct bcrypt_hmac_handle *ctx = hmac->impl;
+
+ size_t buffer_len = output->capacity - output->len;
+
+ if (buffer_len < hmac->digest_size) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ NTSTATUS status = BCryptFinishHash(ctx->hash_handle, output->buffer + output->len, (ULONG)hmac->digest_size, 0);
+
+ hmac->good = false;
+ if (((NTSTATUS)status) < 0) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ output->len += hmac->digest_size;
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_platform_init.c b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_platform_init.c
new file mode 100644
index 0000000000..decedcdafa
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_platform_init.c
@@ -0,0 +1,12 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/allocator.h>
+
+void aws_cal_platform_init(struct aws_allocator *allocator) {
+ (void)allocator;
+}
+
+void aws_cal_platform_clean_up(void) {}
diff --git a/contrib/restricted/aws/aws-c-cal/ya.make b/contrib/restricted/aws/aws-c-cal/ya.make
index 87f658b414..dafca392e2 100644
--- a/contrib/restricted/aws/aws-c-cal/ya.make
+++ b/contrib/restricted/aws/aws-c-cal/ya.make
@@ -32,6 +32,12 @@ CFLAGS(
-DHAVE_SYSCONF
)
+IF (OS_WINDOWS)
+ CFLAGS(
+ -DAWS_CAL_EXPORTS
+ )
+ENDIF()
+
SRCS(
source/cal.c
source/der.c
@@ -64,6 +70,14 @@ ELSEIF (OS_LINUX)
source/unix/opensslcrypto_hash.c
source/unix/opensslcrypto_hmac.c
)
+ELSEIF (OS_WINDOWS)
+ SRCS(
+ source/windows/bcrypt_aes.c
+ source/windows/bcrypt_ecc.c
+ source/windows/bcrypt_hash.c
+ source/windows/bcrypt_hmac.c
+ source/windows/bcrypt_platform_init.c
+ )
ENDIF()
END()