aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/aws
diff options
context:
space:
mode:
authorrobot-contrib <robot-contrib@yandex-team.com>2023-05-10 07:41:00 +0300
committerrobot-contrib <robot-contrib@yandex-team.com>2023-05-10 07:41:00 +0300
commit4364c710590a1787a5cc4b0d5d06632ea0549c56 (patch)
treec7a7ec0bd265e7f85d1b68dd96518f0bf996c77d /contrib/restricted/aws
parent3b8bff8325778a1cebee3d2817de4cabec8a1635 (diff)
downloadydb-4364c710590a1787a5cc4b0d5d06632ea0549c56.tar.gz
Update contrib/restricted/aws/aws-c-cal to 0.5.26
Diffstat (limited to 'contrib/restricted/aws')
-rw-r--r--contrib/restricted/aws/aws-c-cal/CMakeLists.darwin-x86_64.txt2
-rw-r--r--contrib/restricted/aws/aws-c-cal/CMakeLists.linux-aarch64.txt2
-rw-r--r--contrib/restricted/aws/aws-c-cal/CMakeLists.linux-x86_64.txt2
-rw-r--r--contrib/restricted/aws/aws-c-cal/CMakeLists.windows-x86_64.txt2
-rw-r--r--contrib/restricted/aws/aws-c-cal/include/aws/cal/cal.h3
-rw-r--r--contrib/restricted/aws/aws-c-cal/include/aws/cal/private/symmetric_cipher_priv.h59
-rw-r--r--contrib/restricted/aws/aws-c-cal/include/aws/cal/symmetric_cipher.h238
-rw-r--r--contrib/restricted/aws/aws-c-cal/source/cal.c7
-rw-r--r--contrib/restricted/aws/aws-c-cal/source/symmetric_cipher.c256
-rw-r--r--contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c709
10 files changed, 1279 insertions, 1 deletions
diff --git a/contrib/restricted/aws/aws-c-cal/CMakeLists.darwin-x86_64.txt b/contrib/restricted/aws/aws-c-cal/CMakeLists.darwin-x86_64.txt
index c28381a676..e7abf5339f 100644
--- a/contrib/restricted/aws/aws-c-cal/CMakeLists.darwin-x86_64.txt
+++ b/contrib/restricted/aws/aws-c-cal/CMakeLists.darwin-x86_64.txt
@@ -28,6 +28,8 @@ target_sources(restricted-aws-aws-c-cal PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/ecc.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/hash.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/hmac.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/symmetric_cipher.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_ecc.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_hash.c
diff --git a/contrib/restricted/aws/aws-c-cal/CMakeLists.linux-aarch64.txt b/contrib/restricted/aws/aws-c-cal/CMakeLists.linux-aarch64.txt
index d729ec396a..61d0d6b59d 100644
--- a/contrib/restricted/aws/aws-c-cal/CMakeLists.linux-aarch64.txt
+++ b/contrib/restricted/aws/aws-c-cal/CMakeLists.linux-aarch64.txt
@@ -29,6 +29,8 @@ target_sources(restricted-aws-aws-c-cal PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/ecc.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/hash.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/hmac.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/symmetric_cipher.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_ecc.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_hash.c
diff --git a/contrib/restricted/aws/aws-c-cal/CMakeLists.linux-x86_64.txt b/contrib/restricted/aws/aws-c-cal/CMakeLists.linux-x86_64.txt
index d729ec396a..61d0d6b59d 100644
--- a/contrib/restricted/aws/aws-c-cal/CMakeLists.linux-x86_64.txt
+++ b/contrib/restricted/aws/aws-c-cal/CMakeLists.linux-x86_64.txt
@@ -29,6 +29,8 @@ target_sources(restricted-aws-aws-c-cal PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/ecc.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/hash.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/hmac.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/symmetric_cipher.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_ecc.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_hash.c
diff --git a/contrib/restricted/aws/aws-c-cal/CMakeLists.windows-x86_64.txt b/contrib/restricted/aws/aws-c-cal/CMakeLists.windows-x86_64.txt
index c28381a676..e7abf5339f 100644
--- a/contrib/restricted/aws/aws-c-cal/CMakeLists.windows-x86_64.txt
+++ b/contrib/restricted/aws/aws-c-cal/CMakeLists.windows-x86_64.txt
@@ -28,6 +28,8 @@ target_sources(restricted-aws-aws-c-cal PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/ecc.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/hash.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/hmac.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/symmetric_cipher.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_ecc.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_hash.c
diff --git a/contrib/restricted/aws/aws-c-cal/include/aws/cal/cal.h b/contrib/restricted/aws/aws-c-cal/include/aws/cal/cal.h
index 5456c91900..2c6c909838 100644
--- a/contrib/restricted/aws/aws-c-cal/include/aws/cal/cal.h
+++ b/contrib/restricted/aws/aws-c-cal/include/aws/cal/cal.h
@@ -22,7 +22,8 @@ enum aws_cal_errors {
AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED,
AWS_ERROR_CAL_MISMATCHED_DER_TYPE,
AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM,
-
+ AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM,
+ AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM,
AWS_ERROR_CAL_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_CAL_PACKAGE_ID)
};
diff --git a/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/symmetric_cipher_priv.h b/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/symmetric_cipher_priv.h
new file mode 100644
index 0000000000..e8226d7392
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/symmetric_cipher_priv.h
@@ -0,0 +1,59 @@
+#ifndef AWS_CAL_SYMMETRIC_CIPHER_PRIV_H
+#define AWS_CAL_SYMMETRIC_CIPHER_PRIV_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/cal/symmetric_cipher.h>
+
+struct aws_symmetric_cipher;
+
+struct aws_symmetric_cipher_vtable {
+ const char *alg_name;
+ const char *provider;
+ void (*destroy)(struct aws_symmetric_cipher *cipher);
+ /* reset the cipher to being able to start another encrypt or decrypt operation.
+ The original IV, Key, Tag etc... will be restored to the current cipher. */
+ int (*reset)(struct aws_symmetric_cipher *cipher);
+ int (*encrypt)(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out);
+ int (*decrypt)(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out);
+
+ int (*finalize_encryption)(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out);
+ int (*finalize_decryption)(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out);
+};
+
+struct aws_symmetric_cipher {
+ struct aws_allocator *allocator;
+ struct aws_symmetric_cipher_vtable *vtable;
+ struct aws_byte_buf iv;
+ struct aws_byte_buf key;
+ struct aws_byte_buf aad;
+ struct aws_byte_buf tag;
+ size_t block_size;
+ size_t key_length_bits;
+ bool good;
+ void *impl;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Generates a secure random initialization vector of length len_bytes. If is_counter_mode is set, the final 4 bytes
+ * will be reserved as a counter and initialized to 1 in big-endian byte-order.
+ */
+AWS_CAL_API void aws_symmetric_cipher_generate_initialization_vector(
+ size_t len_bytes,
+ bool is_counter_mode,
+ struct aws_byte_buf *out);
+
+/**
+ * Generates a secure random symmetric key of length len_bytes.
+ */
+AWS_CAL_API void aws_symmetric_cipher_generate_key(size_t len_bytes, struct aws_byte_buf *out);
+
+AWS_EXTERN_C_END
+
+/* Don't let this one get exported as it should never be used outside of this library (including tests). */
+int aws_symmetric_cipher_try_ensure_sufficient_buffer_space(struct aws_byte_buf *buf, size_t size);
+
+#endif /* AWS_CAL_SYMMETRIC_CIPHER_PRIV_H */
diff --git a/contrib/restricted/aws/aws-c-cal/include/aws/cal/symmetric_cipher.h b/contrib/restricted/aws/aws-c-cal/include/aws/cal/symmetric_cipher.h
new file mode 100644
index 0000000000..59f44831d8
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-cal/include/aws/cal/symmetric_cipher.h
@@ -0,0 +1,238 @@
+#ifndef AWS_CAL_SYMMETRIC_CIPHER_H
+#define AWS_CAL_SYMMETRIC_CIPHER_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/cal/cal.h>
+#include <aws/common/byte_buf.h>
+
+#define AWS_AES_256_CIPHER_BLOCK_SIZE 16
+#define AWS_AES_256_KEY_BIT_LEN 256
+#define AWS_AES_256_KEY_BYTE_LEN (AWS_AES_256_KEY_BIT_LEN / 8)
+
+struct aws_symmetric_cipher;
+
+typedef struct aws_symmetric_cipher *(aws_aes_cbc_256_new_fn)(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv);
+
+typedef struct aws_symmetric_cipher *(aws_aes_ctr_256_new_fn)(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv);
+
+typedef struct aws_symmetric_cipher *(aws_aes_gcm_256_new_fn)(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv,
+ const struct aws_byte_cursor *aad,
+ const struct aws_byte_cursor *decryption_tag);
+
+typedef struct aws_symmetric_cipher *(
+ aws_aes_keywrap_256_new_fn)(struct aws_allocator *allocator, const struct aws_byte_cursor *key);
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Creates an instance of AES CBC with 256-bit key.
+ * If key and iv are NULL, they will be generated internally.
+ * You can get the generated key and iv back by calling:
+ *
+ * aws_symmetric_cipher_get_key() and
+ * aws_symmetric_cipher_get_initialization_vector()
+ *
+ * respectively.
+ *
+ * If they are set, that key and iv will be copied internally and used by the cipher.
+ *
+ * Returns NULL on failure. You can check aws_last_error() to get the error code indicating the failure cause.
+ */
+AWS_CAL_API struct aws_symmetric_cipher *aws_aes_cbc_256_new(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv);
+
+/**
+ * Creates an instance of AES CTR with 256-bit key.
+ * If key and iv are NULL, they will be generated internally.
+ * You can get the generated key and iv back by calling:
+ *
+ * aws_symmetric_cipher_get_key() and
+ * aws_symmetric_cipher_get_initialization_vector()
+ *
+ * respectively.
+ *
+ * If they are set, that key and iv will be copied internally and used by the cipher.
+ *
+ * Returns NULL on failure. You can check aws_last_error() to get the error code indicating the failure cause.
+ */
+AWS_CAL_API struct aws_symmetric_cipher *aws_aes_ctr_256_new(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv);
+
+/**
+ * Creates an instance of AES GCM with 256-bit key.
+ * If key, iv are NULL, they will be generated internally.
+ * You can get the generated key and iv back by calling:
+ *
+ * aws_symmetric_cipher_get_key() and
+ * aws_symmetric_cipher_get_initialization_vector()
+ *
+ * respectively.
+ *
+ * If they are set, that key and iv will be copied internally and used by the cipher.
+ *
+ * If tag and aad are set they will be copied internally and used by the cipher.
+ * decryption_tag would most likely be used for a decrypt operation to detect tampering or corruption.
+ * The Tag for the most recent encrypt operation will be available in:
+ *
+ * aws_symmetric_cipher_get_tag()
+ *
+ * If aad is set it will be copied and applied to the cipher.
+ *
+ * Returns NULL on failure. You can check aws_last_error() to get the error code indicating the failure cause.
+ */
+AWS_CAL_API struct aws_symmetric_cipher *aws_aes_gcm_256_new(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv,
+ const struct aws_byte_cursor *aad,
+ const struct aws_byte_cursor *decryption_tag);
+
+/**
+ * Creates an instance of AES Keywrap with 256-bit key.
+ * If key is NULL, it will be generated internally.
+ * You can get the generated key back by calling:
+ *
+ * aws_symmetric_cipher_get_key()
+ *
+ * If key is set, that key will be copied internally and used by the cipher.
+ *
+ * Returns NULL on failure. You can check aws_last_error() to get the error code indicating the failure cause.
+ */
+AWS_CAL_API struct aws_symmetric_cipher *aws_aes_keywrap_256_new(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key);
+
+/**
+ * Cleans up internal resources and state for cipher and then deallocates it.
+ */
+AWS_CAL_API void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher);
+
+/**
+ * Encrypts the value in to_encrypt and writes the encrypted data into out.
+ * If out is dynamic it will be expanded. If it is not, and out is not large enough to handle
+ * the encrypted output, the call will fail. If you're trying to optimize to use a stack based array
+ * or something, make sure it's at least as large as the size of to_encrypt + an extra BLOCK to account for
+ * padding etc...
+ *
+ * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns
+ * AWS_OP_ERR;
+ */
+AWS_CAL_API int aws_symmetric_cipher_encrypt(
+ struct aws_symmetric_cipher *cipher,
+ struct aws_byte_cursor to_encrypt,
+ struct aws_byte_buf *out);
+
+/**
+ * Decrypts the value in to_decrypt and writes the decrypted data into out.
+ * If out is dynamic it will be expanded. If it is not, and out is not large enough to handle
+ * the decrypted output, the call will fail. If you're trying to optimize to use a stack based array
+ * or something, make sure it's at least as large as the size of to_decrypt + an extra BLOCK to account for
+ * padding etc...
+ *
+ * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns
+ * AWS_OP_ERR;
+ */
+AWS_CAL_API int aws_symmetric_cipher_decrypt(
+ struct aws_symmetric_cipher *cipher,
+ struct aws_byte_cursor to_decrypt,
+ struct aws_byte_buf *out);
+
+/**
+ * Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any
+ * writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and
+ * out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize
+ * to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for
+ * padding etc...
+ *
+ * After invoking this function, you MUST call aws_symmetric_cipher_reset() before invoking any encrypt/decrypt
+ * operations on this cipher again.
+ *
+ * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns
+ * AWS_OP_ERR;
+ */
+AWS_CAL_API int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out);
+
+/**
+ * Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any
+ * writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and
+ * out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize
+ * to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for
+ * padding etc...
+ *
+ * After invoking this function, you MUST call aws_symmetric_cipher_reset() before invoking any encrypt/decrypt
+ * operations on this cipher again.
+ *
+ * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns
+ * AWS_OP_ERR;
+ */
+AWS_CAL_API int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out);
+
+/**
+ * Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the
+ * same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for
+ * immediate reuse.
+ *
+ * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns
+ * AWS_OP_ERR;
+ */
+AWS_CAL_API int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher);
+
+/**
+ * Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor.
+ * The memory in this cursor is unsafe as it refers to the internal buffer.
+ * This was done because the use case doesn't require fetching these during an
+ * encryption or decryption operation and it dramatically simplifies the API.
+ * Only use this function between other calls to this API as any function call can alter the value of this tag.
+ *
+ * If you need to access it in a different pattern, copy the values to your own buffer first.
+ */
+AWS_CAL_API struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher);
+
+/**
+ * Gets the original intialization vector as a cursor.
+ * The memory in this cursor is unsafe as it refers to the internal buffer.
+ * This was done because the use case doesn't require fetching these during an
+ * encryption or decryption operation and it dramatically simplifies the API.
+ *
+ * Unlike some other fields, this value does not change after the inital construction of the cipher.
+ *
+ * For some algorithms, such as AES Keywrap, this will return an empty cursor.
+ */
+AWS_CAL_API struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector(
+ const struct aws_symmetric_cipher *cipher);
+
+/**
+ * Gets the original key.
+ *
+ * The memory in this cursor is unsafe as it refers to the internal buffer.
+ * This was done because the use case doesn't require fetching these during an
+ * encryption or decryption operation and it dramatically simplifies the API.
+ *
+ * Unlike some other fields, this value does not change after the inital construction of the cipher.
+ */
+AWS_CAL_API struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher);
+
+/**
+ * Returns true if the state of the cipher is good, and otherwise returns false.
+ * Most operations, other than aws_symmetric_cipher_reset() will fail if this function is returning false.
+ * aws_symmetric_cipher_reset() will reset the state to a good state if possible.
+ */
+AWS_CAL_API bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher);
+
+AWS_EXTERN_C_END
+#endif /* AWS_CAL_SYMMETRIC_CIPHER_H */
diff --git a/contrib/restricted/aws/aws-c-cal/source/cal.c b/contrib/restricted/aws/aws-c-cal/source/cal.c
index e793035cb4..13477c8dd3 100644
--- a/contrib/restricted/aws/aws-c-cal/source/cal.c
+++ b/contrib/restricted/aws/aws-c-cal/source/cal.c
@@ -33,6 +33,13 @@ static struct aws_error_info s_errors[] = {
AWS_DEFINE_ERROR_INFO_CAL(
AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM,
"The specified algorithim is unsupported on this platform."),
+ AWS_DEFINE_ERROR_INFO_CAL(
+ AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM,
+ "The input passed to a cipher algorithm was too large for that algorithm. Consider breaking the input into "
+ "smaller chunks."),
+ AWS_DEFINE_ERROR_INFO_CAL(
+ AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM,
+ "A cipher material such as an initialization vector or tag was an incorrect size for the selected algorithm."),
};
static struct aws_error_info_list s_list = {
diff --git a/contrib/restricted/aws/aws-c-cal/source/symmetric_cipher.c b/contrib/restricted/aws/aws-c-cal/source/symmetric_cipher.c
new file mode 100644
index 0000000000..d4c0dbed93
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-cal/source/symmetric_cipher.c
@@ -0,0 +1,256 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/cal/private/symmetric_cipher_priv.h>
+#include <aws/cal/symmetric_cipher.h>
+#include <aws/common/device_random.h>
+
+#ifndef BYO_CRYPTO
+
+extern struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv);
+
+extern struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv);
+
+extern struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv,
+ const struct aws_byte_cursor *aad,
+ const struct aws_byte_cursor *decryption_tag);
+
+extern struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key);
+
+#else /* BYO_CRYPTO */
+struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv) {
+ (void)allocator;
+ (void)key;
+ (void)iv;
+ abort();
+}
+
+struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv) {
+ (void)allocator;
+ (void)key;
+ (void)iv;
+ abort();
+}
+
+struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv,
+ const struct aws_byte_cursor *aad,
+ const struct aws_byte_cursor *decryption_tag) {
+ (void)allocator;
+ (void)key;
+ (void)iv;
+ (void)aad;
+ (void)decryption_tag;
+ abort();
+}
+
+struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key) {
+ (void)allocator;
+ (void)key;
+ abort();
+}
+
+#endif /* BYO_CRYPTO */
+
+static aws_aes_cbc_256_new_fn *s_aes_cbc_new_fn = aws_aes_cbc_256_new_impl;
+static aws_aes_ctr_256_new_fn *s_aes_ctr_new_fn = aws_aes_ctr_256_new_impl;
+static aws_aes_gcm_256_new_fn *s_aes_gcm_new_fn = aws_aes_gcm_256_new_impl;
+static aws_aes_keywrap_256_new_fn *s_aes_keywrap_new_fn = aws_aes_keywrap_256_new_impl;
+
+static int s_check_input_size_limits(const struct aws_symmetric_cipher *cipher, const struct aws_byte_cursor *input) {
+ /* libcrypto uses int, not size_t, so this is the limit.
+ * For simplicity, enforce the same rules on all platforms. */
+ return input->len <= INT_MAX - cipher->block_size ? AWS_OP_SUCCESS
+ : aws_raise_error(AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM);
+}
+
+static int s_validate_key_materials(
+ const struct aws_byte_cursor *key,
+ size_t expected_key_size,
+ const struct aws_byte_cursor *iv,
+ size_t expected_iv_size) {
+ if (key && key->len != expected_key_size) {
+ return aws_raise_error(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM);
+ }
+
+ if (iv && iv->len != expected_iv_size) {
+ return aws_raise_error(AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_symmetric_cipher *aws_aes_cbc_256_new(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv) {
+
+ if (s_validate_key_materials(key, AWS_AES_256_KEY_BYTE_LEN, iv, AWS_AES_256_CIPHER_BLOCK_SIZE) != AWS_OP_SUCCESS) {
+ return NULL;
+ }
+ return s_aes_cbc_new_fn(allocator, key, iv);
+}
+
+struct aws_symmetric_cipher *aws_aes_ctr_256_new(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv) {
+ if (s_validate_key_materials(key, AWS_AES_256_KEY_BYTE_LEN, iv, AWS_AES_256_CIPHER_BLOCK_SIZE) != AWS_OP_SUCCESS) {
+ return NULL;
+ }
+ return s_aes_ctr_new_fn(allocator, key, iv);
+}
+
+struct aws_symmetric_cipher *aws_aes_gcm_256_new(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv,
+ const struct aws_byte_cursor *aad,
+ const struct aws_byte_cursor *decryption_tag) {
+ if (s_validate_key_materials(key, AWS_AES_256_KEY_BYTE_LEN, iv, AWS_AES_256_CIPHER_BLOCK_SIZE - sizeof(uint32_t)) !=
+ AWS_OP_SUCCESS) {
+ return NULL;
+ }
+ return s_aes_gcm_new_fn(allocator, key, iv, aad, decryption_tag);
+}
+
+struct aws_symmetric_cipher *aws_aes_keywrap_256_new(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key) {
+ if (s_validate_key_materials(key, AWS_AES_256_KEY_BYTE_LEN, NULL, 0) != AWS_OP_SUCCESS) {
+ return NULL;
+ }
+ return s_aes_keywrap_new_fn(allocator, key);
+}
+
+void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher) {
+ if (cipher) {
+ cipher->vtable->destroy(cipher);
+ }
+}
+
+int aws_symmetric_cipher_encrypt(
+ struct aws_symmetric_cipher *cipher,
+ struct aws_byte_cursor to_encrypt,
+ struct aws_byte_buf *out) {
+
+ if (AWS_UNLIKELY(s_check_input_size_limits(cipher, &to_encrypt) != AWS_OP_SUCCESS)) {
+ return AWS_OP_ERR;
+ }
+
+ if (cipher->good) {
+ return cipher->vtable->encrypt(cipher, to_encrypt, out);
+ }
+
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+int aws_symmetric_cipher_decrypt(
+ struct aws_symmetric_cipher *cipher,
+ struct aws_byte_cursor to_decrypt,
+ struct aws_byte_buf *out) {
+
+ if (AWS_UNLIKELY(s_check_input_size_limits(cipher, &to_decrypt) != AWS_OP_SUCCESS)) {
+ return AWS_OP_ERR;
+ }
+
+ if (cipher->good) {
+ return cipher->vtable->decrypt(cipher, to_decrypt, out);
+ }
+
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ if (cipher->good) {
+ int ret_val = cipher->vtable->finalize_encryption(cipher, out);
+ cipher->good = false;
+ return ret_val;
+ }
+
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ if (cipher->good) {
+ int ret_val = cipher->vtable->finalize_decryption(cipher, out);
+ cipher->good = false;
+ return ret_val;
+ }
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher) {
+ int ret_val = cipher->vtable->reset(cipher);
+ if (ret_val == AWS_OP_SUCCESS) {
+ cipher->good = true;
+ }
+
+ return ret_val;
+}
+
+struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher) {
+ return aws_byte_cursor_from_buf(&cipher->tag);
+}
+
+struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector(const struct aws_symmetric_cipher *cipher) {
+ return aws_byte_cursor_from_buf(&cipher->iv);
+}
+
+struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher) {
+ return aws_byte_cursor_from_buf(&cipher->key);
+}
+
+bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher) {
+ return cipher->good;
+}
+
+void aws_symmetric_cipher_generate_initialization_vector(
+ size_t len_bytes,
+ bool is_counter_mode,
+ struct aws_byte_buf *out) {
+ size_t counter_len = is_counter_mode ? sizeof(uint32_t) : 0;
+ AWS_ASSERT(len_bytes > counter_len);
+ size_t rand_len = len_bytes - counter_len;
+
+ AWS_FATAL_ASSERT(aws_device_random_buffer_append(out, rand_len) == AWS_OP_SUCCESS);
+
+ if (is_counter_mode) {
+ /* put counter at the end, initialized to 1 */
+ aws_byte_buf_write_be32(out, 1);
+ }
+}
+
+void aws_symmetric_cipher_generate_key(size_t key_len_bytes, struct aws_byte_buf *out) {
+ AWS_FATAL_ASSERT(aws_device_random_buffer_append(out, key_len_bytes) == AWS_OP_SUCCESS);
+}
+
+int aws_symmetric_cipher_try_ensure_sufficient_buffer_space(struct aws_byte_buf *buf, size_t size) {
+ if (buf->capacity - buf->len < size) {
+ return aws_byte_buf_reserve_relative(buf, size);
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c
new file mode 100644
index 0000000000..78ba7a9ee8
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c
@@ -0,0 +1,709 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/cal/private/symmetric_cipher_priv.h>
+
+#include <openssl/evp.h>
+
+struct openssl_aes_cipher {
+ struct aws_symmetric_cipher cipher_base;
+ EVP_CIPHER_CTX *encryptor_ctx;
+ EVP_CIPHER_CTX *decryptor_ctx;
+ struct aws_byte_buf working_buffer;
+};
+
+static int s_encrypt(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) {
+
+ size_t required_buffer_space = input.len + cipher->block_size;
+
+ if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ size_t available_write_space = out->capacity - out->len;
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ int len_written = (int)(available_write_space);
+ if (!EVP_EncryptUpdate(
+ openssl_cipher->encryptor_ctx, out->buffer + out->len, &len_written, input.ptr, (int)input.len)) {
+ cipher->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ out->len += len_written;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ size_t required_buffer_space = cipher->block_size;
+
+ if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ int len_written = (int)(out->capacity - out->len);
+ if (!EVP_EncryptFinal_ex(openssl_cipher->encryptor_ctx, out->buffer + out->len, &len_written)) {
+ cipher->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ out->len += len_written;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_decrypt(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) {
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ size_t required_buffer_space = input.len + cipher->block_size;
+
+ if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ size_t available_write_space = out->capacity - out->len;
+
+ int len_written = (int)available_write_space;
+ if (!EVP_DecryptUpdate(
+ openssl_cipher->decryptor_ctx, out->buffer + out->len, &len_written, input.ptr, (int)input.len)) {
+ cipher->good = false;
+
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ out->len += len_written;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ size_t required_buffer_space = cipher->block_size;
+
+ if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ int len_written = (int)out->capacity - out->len;
+ if (!EVP_DecryptFinal_ex(openssl_cipher->decryptor_ctx, out->buffer + out->len, &len_written)) {
+ cipher->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ out->len += len_written;
+ return AWS_OP_SUCCESS;
+}
+
+static void s_destroy(struct aws_symmetric_cipher *cipher) {
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ if (openssl_cipher->encryptor_ctx) {
+ EVP_CIPHER_CTX_free(openssl_cipher->encryptor_ctx);
+ }
+
+ if (openssl_cipher->decryptor_ctx) {
+ EVP_CIPHER_CTX_free(openssl_cipher->decryptor_ctx);
+ }
+
+ aws_byte_buf_clean_up_secure(&cipher->key);
+ aws_byte_buf_clean_up_secure(&cipher->iv);
+
+ if (cipher->tag.buffer) {
+ aws_byte_buf_clean_up_secure(&cipher->tag);
+ }
+
+ if (cipher->aad.buffer) {
+ aws_byte_buf_clean_up_secure(&cipher->aad);
+ }
+
+ aws_byte_buf_clean_up_secure(&openssl_cipher->working_buffer);
+
+ aws_mem_release(cipher->allocator, openssl_cipher);
+}
+
+static int s_clear_reusable_state(struct aws_symmetric_cipher *cipher) {
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ EVP_CIPHER_CTX_cleanup(openssl_cipher->encryptor_ctx);
+ EVP_CIPHER_CTX_cleanup(openssl_cipher->decryptor_ctx);
+ aws_byte_buf_secure_zero(&openssl_cipher->working_buffer);
+ cipher->good = true;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_init_cbc_cipher_materials(struct aws_symmetric_cipher *cipher) {
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ if (!EVP_EncryptInit_ex(
+ openssl_cipher->encryptor_ctx,
+ EVP_aes_256_cbc(),
+ NULL,
+ openssl_cipher->cipher_base.key.buffer,
+ openssl_cipher->cipher_base.iv.buffer) ||
+ !EVP_DecryptInit_ex(
+ openssl_cipher->decryptor_ctx,
+ EVP_aes_256_cbc(),
+ NULL,
+ openssl_cipher->cipher_base.key.buffer,
+ openssl_cipher->cipher_base.iv.buffer)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_reset_cbc_cipher_materials(struct aws_symmetric_cipher *cipher) {
+ int ret_val = s_clear_reusable_state(cipher);
+
+ if (ret_val == AWS_OP_SUCCESS) {
+ return s_init_cbc_cipher_materials(cipher);
+ }
+
+ return ret_val;
+}
+
+static struct aws_symmetric_cipher_vtable s_cbc_vtable = {
+ .alg_name = "AES-CBC 256",
+ .provider = "OpenSSL Compatible LibCrypto",
+ .destroy = s_destroy,
+ .reset = s_reset_cbc_cipher_materials,
+ .decrypt = s_decrypt,
+ .encrypt = s_encrypt,
+ .finalize_decryption = s_finalize_decryption,
+ .finalize_encryption = s_finalize_encryption,
+};
+
+struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv) {
+ struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher));
+
+ cipher->cipher_base.allocator = allocator;
+ cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE;
+ cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN;
+ cipher->cipher_base.vtable = &s_cbc_vtable;
+ cipher->cipher_base.impl = cipher;
+
+ if (key) {
+ aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key);
+ } else {
+ aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN);
+ aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key);
+ }
+
+ if (iv) {
+ aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.iv, allocator, *iv);
+ } else {
+ aws_byte_buf_init(&cipher->cipher_base.iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE);
+ aws_symmetric_cipher_generate_initialization_vector(
+ AWS_AES_256_CIPHER_BLOCK_SIZE, false, &cipher->cipher_base.iv);
+ }
+
+ /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */
+ cipher->encryptor_ctx = EVP_CIPHER_CTX_new();
+ AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Cipher initialization failed!");
+
+ /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */
+ cipher->decryptor_ctx = EVP_CIPHER_CTX_new();
+ AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Cipher initialization failed!");
+
+ if (s_init_cbc_cipher_materials(&cipher->cipher_base) != AWS_OP_SUCCESS) {
+ goto error;
+ }
+
+ cipher->cipher_base.good = true;
+ return &cipher->cipher_base;
+
+error:
+ s_destroy(&cipher->cipher_base);
+ return NULL;
+}
+
+static int s_init_ctr_cipher_materials(struct aws_symmetric_cipher *cipher) {
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ if (!(EVP_EncryptInit_ex(
+ openssl_cipher->encryptor_ctx,
+ EVP_aes_256_ctr(),
+ NULL,
+ openssl_cipher->cipher_base.key.buffer,
+ openssl_cipher->cipher_base.iv.buffer) &&
+ EVP_CIPHER_CTX_set_padding(openssl_cipher->encryptor_ctx, 0)) ||
+ !(EVP_DecryptInit_ex(
+ openssl_cipher->decryptor_ctx,
+ EVP_aes_256_ctr(),
+ NULL,
+ openssl_cipher->cipher_base.key.buffer,
+ openssl_cipher->cipher_base.iv.buffer) &&
+ EVP_CIPHER_CTX_set_padding(openssl_cipher->decryptor_ctx, 0))) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_reset_ctr_cipher_materials(struct aws_symmetric_cipher *cipher) {
+ int ret_val = s_clear_reusable_state(cipher);
+
+ if (ret_val == AWS_OP_SUCCESS) {
+ return s_init_ctr_cipher_materials(cipher);
+ }
+
+ return ret_val;
+}
+
+static struct aws_symmetric_cipher_vtable s_ctr_vtable = {
+ .alg_name = "AES-CTR 256",
+ .provider = "OpenSSL Compatible LibCrypto",
+ .destroy = s_destroy,
+ .reset = s_reset_ctr_cipher_materials,
+ .decrypt = s_decrypt,
+ .encrypt = s_encrypt,
+ .finalize_decryption = s_finalize_decryption,
+ .finalize_encryption = s_finalize_encryption,
+};
+
+struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv) {
+ struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher));
+
+ cipher->cipher_base.allocator = allocator;
+ cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE;
+ cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN;
+ cipher->cipher_base.vtable = &s_ctr_vtable;
+ cipher->cipher_base.impl = cipher;
+
+ if (key) {
+ aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key);
+ } else {
+ aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN);
+ aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key);
+ }
+
+ if (iv) {
+ aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.iv, allocator, *iv);
+ } else {
+ aws_byte_buf_init(&cipher->cipher_base.iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE);
+ aws_symmetric_cipher_generate_initialization_vector(
+ AWS_AES_256_CIPHER_BLOCK_SIZE, true, &cipher->cipher_base.iv);
+ }
+
+ /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */
+ cipher->encryptor_ctx = EVP_CIPHER_CTX_new();
+ AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Cipher initialization failed!");
+
+ /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */
+ cipher->decryptor_ctx = EVP_CIPHER_CTX_new();
+ AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Cipher initialization failed!");
+
+ if (s_init_ctr_cipher_materials(&cipher->cipher_base) != AWS_OP_SUCCESS) {
+ goto error;
+ }
+
+ cipher->cipher_base.good = true;
+ return &cipher->cipher_base;
+
+error:
+ s_destroy(&cipher->cipher_base);
+ return NULL;
+}
+
+static int s_finalize_gcm_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ int ret_val = s_finalize_encryption(cipher, out);
+
+ if (ret_val == AWS_OP_SUCCESS) {
+ if (!cipher->tag.len) {
+ if (!EVP_CIPHER_CTX_ctrl(
+ openssl_cipher->encryptor_ctx,
+ EVP_CTRL_GCM_GET_TAG,
+ (int)cipher->tag.capacity,
+ cipher->tag.buffer)) {
+ cipher->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ cipher->tag.len = AWS_AES_256_CIPHER_BLOCK_SIZE;
+ }
+ }
+
+ return ret_val;
+}
+
+static int s_init_gcm_cipher_materials(struct aws_symmetric_cipher *cipher) {
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ if (!(EVP_EncryptInit_ex(openssl_cipher->encryptor_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL) &&
+ EVP_EncryptInit_ex(
+ openssl_cipher->encryptor_ctx,
+ NULL,
+ NULL,
+ openssl_cipher->cipher_base.key.buffer,
+ openssl_cipher->cipher_base.iv.buffer) &&
+ EVP_CIPHER_CTX_set_padding(openssl_cipher->encryptor_ctx, 0)) ||
+ !(EVP_DecryptInit_ex(openssl_cipher->decryptor_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL) &&
+ EVP_DecryptInit_ex(
+ openssl_cipher->decryptor_ctx,
+ NULL,
+ NULL,
+ openssl_cipher->cipher_base.key.buffer,
+ openssl_cipher->cipher_base.iv.buffer) &&
+ EVP_CIPHER_CTX_set_padding(openssl_cipher->decryptor_ctx, 0))) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (openssl_cipher->cipher_base.aad.len) {
+ int outLen = 0;
+ if (!EVP_EncryptUpdate(
+ openssl_cipher->encryptor_ctx,
+ NULL,
+ &outLen,
+ openssl_cipher->cipher_base.aad.buffer,
+ (int)openssl_cipher->cipher_base.aad.len) ||
+ !EVP_DecryptUpdate(
+ openssl_cipher->decryptor_ctx,
+ NULL,
+ &outLen,
+ openssl_cipher->cipher_base.aad.buffer,
+ (int)openssl_cipher->cipher_base.aad.len)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ }
+
+ if (openssl_cipher->cipher_base.tag.len) {
+ if (!EVP_CIPHER_CTX_ctrl(
+ openssl_cipher->decryptor_ctx,
+ EVP_CTRL_GCM_SET_TAG,
+ (int)openssl_cipher->cipher_base.tag.len,
+ openssl_cipher->cipher_base.tag.buffer)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_reset_gcm_cipher_materials(struct aws_symmetric_cipher *cipher) {
+ int ret_val = s_clear_reusable_state(cipher);
+
+ if (ret_val == AWS_OP_SUCCESS) {
+ return s_init_gcm_cipher_materials(cipher);
+ }
+
+ return ret_val;
+}
+
+static struct aws_symmetric_cipher_vtable s_gcm_vtable = {
+ .alg_name = "AES-GCM 256",
+ .provider = "OpenSSL Compatible LibCrypto",
+ .destroy = s_destroy,
+ .reset = s_reset_gcm_cipher_materials,
+ .decrypt = s_decrypt,
+ .encrypt = s_encrypt,
+ .finalize_decryption = s_finalize_decryption,
+ .finalize_encryption = s_finalize_gcm_encryption,
+};
+
+struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *iv,
+ const struct aws_byte_cursor *aad,
+ const struct aws_byte_cursor *decryption_tag) {
+
+ struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher));
+ cipher->cipher_base.allocator = allocator;
+ cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE;
+ cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN;
+ cipher->cipher_base.vtable = &s_gcm_vtable;
+ cipher->cipher_base.impl = cipher;
+
+ /* Copy key into the cipher context. */
+ if (key) {
+ aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key);
+ } else {
+ aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN);
+ aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key);
+ }
+
+ /* Copy initialization vector into the cipher context. */
+ if (iv) {
+ aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.iv, allocator, *iv);
+ } else {
+ aws_byte_buf_init(&cipher->cipher_base.iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE - 4);
+ aws_symmetric_cipher_generate_initialization_vector(
+ AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, &cipher->cipher_base.iv);
+ }
+
+ /* Initialize the cipher contexts. */
+ cipher->encryptor_ctx = EVP_CIPHER_CTX_new();
+ AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Encryptor cipher initialization failed!");
+
+ cipher->decryptor_ctx = EVP_CIPHER_CTX_new();
+ AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Decryptor cipher initialization failed!");
+
+ /* Set AAD if provided */
+ if (aad) {
+ aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.aad, allocator, *aad);
+ }
+
+ /* Set tag for the decryptor to use.*/
+ if (decryption_tag) {
+ aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.tag, allocator, *decryption_tag);
+ } else {
+ /* we'll need this later when we grab the tag during encryption time. */
+ aws_byte_buf_init(&cipher->cipher_base.tag, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE);
+ }
+
+ /* Initialize the cipher contexts with the specified key and IV. */
+ if (s_init_gcm_cipher_materials(&cipher->cipher_base)) {
+ goto error;
+ }
+
+ cipher->cipher_base.good = true;
+ return &cipher->cipher_base;
+
+error:
+ s_destroy(&cipher->cipher_base);
+ return NULL;
+}
+
+static int s_key_wrap_encrypt_decrypt(
+ struct aws_symmetric_cipher *cipher,
+ struct aws_byte_cursor input,
+ struct aws_byte_buf *out) {
+ (void)out;
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ return aws_byte_buf_append_dynamic(&openssl_cipher->working_buffer, &input);
+}
+
+static const size_t MIN_CEK_LENGTH_BYTES = 128 / 8;
+static const unsigned char INTEGRITY_VALUE = 0xA6;
+#define KEYWRAP_BLOCK_SIZE 8u
+
+static int s_key_wrap_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ if (openssl_cipher->working_buffer.len < MIN_CEK_LENGTH_BYTES) {
+ cipher->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ /* the following is an in place implementation of
+ RFC 3394 using the alternate in-place implementation.
+ we use one in-place buffer instead of the copy at the end.
+ the one letter variable names are meant to directly reflect the variables in the RFC */
+ size_t required_buffer_space = openssl_cipher->working_buffer.len + cipher->block_size;
+ size_t starting_len_offset = out->len;
+
+ if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* put the integrity check register in the first 8 bytes of the final buffer. */
+ aws_byte_buf_write_u8_n(out, INTEGRITY_VALUE, KEYWRAP_BLOCK_SIZE);
+ uint8_t *a = out->buffer + starting_len_offset;
+
+ struct aws_byte_cursor working_buf_cur = aws_byte_cursor_from_buf(&openssl_cipher->working_buffer);
+ aws_byte_buf_write_from_whole_cursor(out, working_buf_cur);
+
+ /* put the register buffer after the integrity check register */
+ uint8_t *r = out->buffer + starting_len_offset + KEYWRAP_BLOCK_SIZE;
+
+ int n = (int)(openssl_cipher->working_buffer.len / KEYWRAP_BLOCK_SIZE);
+
+ uint8_t b_buf[KEYWRAP_BLOCK_SIZE * 2] = {0};
+ struct aws_byte_buf b = aws_byte_buf_from_empty_array(b_buf, sizeof(b_buf));
+ int b_out_len = b.capacity;
+
+ uint8_t temp_buf[KEYWRAP_BLOCK_SIZE * 2] = {0};
+ struct aws_byte_buf temp_input = aws_byte_buf_from_empty_array(temp_buf, sizeof(temp_buf));
+
+ for (int j = 0; j <= 5; ++j) {
+ for (int i = 1; i <= n; ++i) {
+ /* concat A and R[i], A should be most significant and then R[i] should be least significant. */
+ memcpy(temp_input.buffer, a, KEYWRAP_BLOCK_SIZE);
+ memcpy(temp_input.buffer + KEYWRAP_BLOCK_SIZE, r, KEYWRAP_BLOCK_SIZE);
+
+ /* encrypt the concatenated A and R[I] and store it in B */
+ if (!EVP_EncryptUpdate(
+ openssl_cipher->encryptor_ctx, b.buffer, &b_out_len, temp_input.buffer, (int)temp_input.capacity)) {
+ cipher->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ unsigned char t = (unsigned char)((n * j) + i);
+ /* put the 64 MSB ^ T into A */
+ memcpy(a, b.buffer, KEYWRAP_BLOCK_SIZE);
+ a[7] ^= t;
+
+ /* put the 64 LSB into R[i] */
+ memcpy(r, b.buffer + KEYWRAP_BLOCK_SIZE, KEYWRAP_BLOCK_SIZE);
+ /* increment i -> R[i] */
+ r += KEYWRAP_BLOCK_SIZE;
+ }
+ /* reset R */
+ r = out->buffer + starting_len_offset + KEYWRAP_BLOCK_SIZE;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_key_wrap_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ if (openssl_cipher->working_buffer.len < MIN_CEK_LENGTH_BYTES + KEYWRAP_BLOCK_SIZE) {
+ cipher->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ /* the following is an in place implementation of
+ RFC 3394 using the alternate in-place implementation.
+ we use one in-place buffer instead of the copy at the end.
+ the one letter variable names are meant to directly reflect the variables in the RFC */
+ size_t required_buffer_space = openssl_cipher->working_buffer.len - KEYWRAP_BLOCK_SIZE;
+ size_t starting_len_offset = out->len;
+
+ if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ memcpy(
+ out->buffer + starting_len_offset,
+ openssl_cipher->working_buffer.buffer + KEYWRAP_BLOCK_SIZE,
+ required_buffer_space);
+
+ /* integrity register should be the first 8 bytes of the final buffer. */
+ uint8_t *a = openssl_cipher->working_buffer.buffer;
+
+ /* in-place register is the plaintext. For decryption, start at the last array position (8 bytes before the end); */
+ uint8_t *r = out->buffer + starting_len_offset + required_buffer_space - KEYWRAP_BLOCK_SIZE;
+
+ int n = (int)(required_buffer_space / KEYWRAP_BLOCK_SIZE);
+
+ uint8_t b_buf[KEYWRAP_BLOCK_SIZE * 10] = {0};
+ struct aws_byte_buf b = aws_byte_buf_from_empty_array(b_buf, sizeof(b_buf));
+ int b_out_len = b.capacity;
+
+ uint8_t temp_buf[KEYWRAP_BLOCK_SIZE * 2] = {0};
+ struct aws_byte_buf temp_input = aws_byte_buf_from_empty_array(temp_buf, sizeof(temp_buf));
+
+ for (int j = 5; j >= 0; --j) {
+ for (int i = n; i >= 1; --i) {
+ /* concat A and T */
+ memcpy(temp_input.buffer, a, KEYWRAP_BLOCK_SIZE);
+ unsigned char t = (unsigned char)((n * j) + i);
+ temp_input.buffer[7] ^= t;
+ /* R[i] */
+ memcpy(temp_input.buffer + KEYWRAP_BLOCK_SIZE, r, KEYWRAP_BLOCK_SIZE);
+
+ /* Decrypt the concatenated buffer */
+ if (!EVP_DecryptUpdate(
+ openssl_cipher->decryptor_ctx, b.buffer, &b_out_len, temp_input.buffer, (int)temp_input.capacity)) {
+ cipher->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ /* set A to 64 MSB of decrypted result */
+ memcpy(a, b.buffer, KEYWRAP_BLOCK_SIZE);
+ /* set the R[i] to the 64 LSB of decrypted result */
+ memcpy(r, b.buffer + KEYWRAP_BLOCK_SIZE, KEYWRAP_BLOCK_SIZE);
+ /* decrement i -> R[i] */
+ r -= KEYWRAP_BLOCK_SIZE;
+ }
+ /* reset R */
+ r = out->buffer + starting_len_offset + required_buffer_space - KEYWRAP_BLOCK_SIZE;
+ }
+
+ /* here we perform the integrity check to make sure A == 0xA6A6A6A6A6A6A6A6 */
+ for (size_t i = 0; i < KEYWRAP_BLOCK_SIZE; ++i) {
+ if (a[i] != INTEGRITY_VALUE) {
+ cipher->good = false;
+ return aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED);
+ }
+ }
+
+ out->len += required_buffer_space;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_init_keywrap_cipher_materials(struct aws_symmetric_cipher *cipher) {
+ struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+ if (!(EVP_EncryptInit_ex(openssl_cipher->encryptor_ctx, EVP_aes_256_ecb(), NULL, cipher->key.buffer, NULL) &&
+ EVP_CIPHER_CTX_set_padding(openssl_cipher->encryptor_ctx, 0)) ||
+ !(EVP_DecryptInit_ex(openssl_cipher->decryptor_ctx, EVP_aes_256_ecb(), NULL, cipher->key.buffer, NULL) &&
+ EVP_CIPHER_CTX_set_padding(openssl_cipher->decryptor_ctx, 0))) {
+ cipher->good = false;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_reset_keywrap_cipher_materials(struct aws_symmetric_cipher *cipher) {
+ int ret_val = s_clear_reusable_state(cipher);
+
+ if (ret_val == AWS_OP_SUCCESS) {
+ return s_init_keywrap_cipher_materials(cipher);
+ }
+
+ return ret_val;
+}
+
+static struct aws_symmetric_cipher_vtable s_keywrap_vtable = {
+ .alg_name = "AES-KEYWRAP 256",
+ .provider = "OpenSSL Compatible LibCrypto",
+ .destroy = s_destroy,
+ .reset = s_reset_keywrap_cipher_materials,
+ .decrypt = s_key_wrap_encrypt_decrypt,
+ .encrypt = s_key_wrap_encrypt_decrypt,
+ .finalize_decryption = s_key_wrap_finalize_decryption,
+ .finalize_encryption = s_key_wrap_finalize_encryption,
+};
+
+struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *key) {
+ struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher));
+ cipher->cipher_base.allocator = allocator;
+ cipher->cipher_base.block_size = KEYWRAP_BLOCK_SIZE;
+ cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN;
+ cipher->cipher_base.vtable = &s_keywrap_vtable;
+ cipher->cipher_base.impl = cipher;
+
+ /* Copy key into the cipher context. */
+ if (key) {
+ aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key);
+ } else {
+ aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN);
+ aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key);
+ }
+
+ aws_byte_buf_init(&cipher->working_buffer, allocator, KEYWRAP_BLOCK_SIZE);
+
+ /* Initialize the cipher contexts. */
+ cipher->encryptor_ctx = EVP_CIPHER_CTX_new();
+ AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Encryptor cipher initialization failed!");
+
+ cipher->decryptor_ctx = EVP_CIPHER_CTX_new();
+ AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Decryptor cipher initialization failed!");
+
+ /* Initialize the cipher contexts with the specified key and IV. */
+ if (s_init_keywrap_cipher_materials(&cipher->cipher_base)) {
+ goto error;
+ }
+
+ cipher->cipher_base.good = true;
+ return &cipher->cipher_base;
+
+error:
+ s_destroy(&cipher->cipher_base);
+ return NULL;
+}