diff options
author | thegeorg <[email protected]> | 2025-05-12 15:51:24 +0300 |
---|---|---|
committer | thegeorg <[email protected]> | 2025-05-12 16:06:27 +0300 |
commit | d629bb70c8773d2c0c43f5088ddbb5a86d8c37ea (patch) | |
tree | 4f678e0d65ad08c800db21c657d3b0f71fafed06 /contrib/restricted/aws/aws-c-cal/source | |
parent | 92c4b696d7a1c03d54e13aff7a7c20a078d90dd7 (diff) |
Update contrib/restricted/aws libraries to nixpkgs 24.05
commit_hash:f8083acb039e6005e820cdee77b84e0a6b6c6d6d
Diffstat (limited to 'contrib/restricted/aws/aws-c-cal/source')
17 files changed, 1985 insertions, 110 deletions
diff --git a/contrib/restricted/aws/aws-c-cal/source/cal.c b/contrib/restricted/aws/aws-c-cal/source/cal.c index 13477c8dd3b..5b0f8ba1a69 100644 --- a/contrib/restricted/aws/aws-c-cal/source/cal.c +++ b/contrib/restricted/aws/aws-c-cal/source/cal.c @@ -19,7 +19,7 @@ static struct aws_error_info s_errors[] = { "sign a message with a public key."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, - "A key length was used for an algorithm that needs a different key length"), + "A key length was used for an algorithm that needs a different key length."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER, "An ASN.1 OID was encountered that wasn't expected or understood. Most likely, an unsupported algorithm was " @@ -29,10 +29,10 @@ static struct aws_error_info s_errors[] = { "An ASN.1 DER decoding operation failed on malformed input."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_MISMATCHED_DER_TYPE, - "An invalid DER type was requested during encoding/decoding"), + "An invalid DER type was requested during encoding/decoding."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM, - "The specified algorithim is unsupported on this platform."), + "The specified algorithm is unsupported on this platform."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM, "The input passed to a cipher algorithm was too large for that algorithm. Consider breaking the input into " @@ -40,7 +40,13 @@ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM, "A cipher material such as an initialization vector or tag was an incorrect size for the selected algorithm."), -}; + AWS_DEFINE_ERROR_INFO_CAL( + AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT, + "DER decoder does support negative integers."), + AWS_DEFINE_ERROR_INFO_CAL(AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT, "Key format is not supported."), + AWS_DEFINE_ERROR_INFO_CAL( + AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED, + "Unknown error when calling underlying Crypto library.")}; static struct aws_error_info_list s_list = { .error_list = s_errors, @@ -60,6 +66,7 @@ static struct aws_log_subject_info s_cal_log_subject_infos[] = { AWS_LS_CAL_LIBCRYPTO_RESOLVE, "libcrypto_resolve", "Subject for libcrypto symbol resolution logging."), + DEFINE_LOG_SUBJECT_INFO(AWS_LS_CAL_RSA, "rsa", "Subject for rsa cryptography specific logging."), }; static struct aws_log_subject_info_list s_cal_log_subject_list = { @@ -70,6 +77,7 @@ static struct aws_log_subject_info_list s_cal_log_subject_list = { #ifndef BYO_CRYPTO extern void aws_cal_platform_init(struct aws_allocator *allocator); extern void aws_cal_platform_clean_up(void); +extern void aws_cal_platform_thread_clean_up(void); #endif /* BYO_CRYPTO */ static bool s_cal_library_initialized = false; @@ -96,3 +104,9 @@ void aws_cal_library_clean_up(void) { aws_common_library_clean_up(); } } + +void aws_cal_thread_clean_up(void) { +#ifndef BYO_CRYPTO + aws_cal_platform_thread_clean_up(); +#endif /* BYO_CRYPTO */ +} diff --git a/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_aes.c b/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_aes.c index 8656d54d071..64dbfd43bc1 100644 --- a/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_aes.c +++ b/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_aes.c @@ -9,11 +9,18 @@ #include <CommonCrypto/CommonHMAC.h> #include <CommonCrypto/CommonSymmetricKeywrap.h> -#include "common_cryptor_spi.h" - -#if (defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && (__MAC_OS_X_VERSION_MAX_ALLOWED >= 101300 /* macOS 10.13 */)) || \ - (defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && (__IPHONE_OS_VERSION_MAX_ALLOWED >= 110000 /* iOS v11 */)) -# define USE_LATEST_CRYPTO_API 1 +#if !defined(AWS_APPSTORE_SAFE) +/* CommonCrypto does not offer public APIs for doing AES GCM. + * There are private APIs for doing it (CommonCryptoSPI.h), but App Store + * submissions that reference these private symbols will be rejected. */ + +# define SUPPORT_AES_GCM_VIA_SPI 1 +# include "common_cryptor_spi.h" + +# if (defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && (__MAC_OS_X_VERSION_MAX_ALLOWED >= 101300 /* macOS 10.13 */)) || \ + (defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && (__IPHONE_OS_VERSION_MAX_ALLOWED >= 110000 /* iOS v11 */)) +# define USE_LATEST_CRYPTO_API 1 +# endif #endif struct cc_aes_cipher { @@ -353,43 +360,45 @@ struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl( return &cc_cipher->cipher_base; } +#ifdef SUPPORT_AES_GCM_VIA_SPI + /* * Note that CCCryptorGCMFinal is deprecated in Mac 10.13. It also doesn't compare the tag with expected tag * https://opensource.apple.com/source/CommonCrypto/CommonCrypto-60118.1.1/include/CommonCryptorSPI.h.auto.html */ static CCStatus s_cc_crypto_gcm_finalize(struct _CCCryptor *encryptor_handle, uint8_t *buffer, size_t tag_length) { -#ifdef USE_LATEST_CRYPTO_API +# ifdef USE_LATEST_CRYPTO_API if (__builtin_available(macOS 10.13, iOS 11.0, *)) { return CCCryptorGCMFinalize(encryptor_handle, buffer, tag_length); } else { /* We would never hit this branch for newer macOS and iOS versions because of the __builtin_available check, so we can * suppress the compiler warning. */ -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wdeprecated-declarations" +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated-declarations" return CCCryptorGCMFinal(encryptor_handle, buffer, &tag_length); -# pragma clang diagnostic pop +# pragma clang diagnostic pop } -#else +# else return CCCryptorGCMFinal(encryptor_handle, buffer, &tag_length); -#endif +# endif } static CCCryptorStatus s_cc_cryptor_gcm_set_iv(struct _CCCryptor *encryptor_handle, uint8_t *buffer, size_t length) { -#ifdef USE_LATEST_CRYPTO_API +# ifdef USE_LATEST_CRYPTO_API if (__builtin_available(macOS 10.13, iOS 11.0, *)) { return CCCryptorGCMSetIV(encryptor_handle, buffer, length); } else { /* We would never hit this branch for newer macOS and iOS versions because of the __builtin_available check, so we can * suppress the compiler warning. */ -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wdeprecated-declarations" +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated-declarations" return CCCryptorGCMAddIV(encryptor_handle, buffer, length); -# pragma clang diagnostic pop +# pragma clang diagnostic pop } -#else +# else return CCCryptorGCMAddIV(encryptor_handle, buffer, length); -#endif +# endif } static int s_finalize_gcm_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { @@ -581,6 +590,26 @@ struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( return &cc_cipher->cipher_base; } +#else /* !SUPPORT_AES_GCM_VIA_SPI */ + +struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv, + const struct aws_byte_cursor *aad, + const struct aws_byte_cursor *tag) { + + (void)allocator; + (void)key; + (void)iv; + (void)aad; + (void)tag; + aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); + return NULL; +} + +#endif /* SUPPORT_AES_GCM_VIA_SPI */ + static int s_keywrap_encrypt_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, diff --git a/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_platform_init.c b/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_platform_init.c index decedcdafa2..f2da2805673 100644 --- a/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_platform_init.c +++ b/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_platform_init.c @@ -10,3 +10,5 @@ void aws_cal_platform_init(struct aws_allocator *allocator) { } void aws_cal_platform_clean_up(void) {} + +void aws_cal_platform_thread_clean_up(void) {} diff --git a/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_ecc.c b/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_ecc.c index de313f08f2a..646484449f1 100644 --- a/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_ecc.c +++ b/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_ecc.c @@ -7,13 +7,10 @@ #include <aws/cal/cal.h> #include <aws/cal/private/der.h> +#include <CoreFoundation/CoreFoundation.h> #include <Security/SecKey.h> #include <Security/Security.h> -#if !defined(AWS_OS_IOS) -# include <Security/SecSignVerifyTransform.h> -#endif - struct commoncrypto_ecc_key_pair { struct aws_ecc_key_pair key_pair; SecKeyRef priv_key_ref; @@ -25,6 +22,29 @@ static uint8_t s_preamble = 0x04; static size_t s_der_overhead = 8; +/* The hard-coded "valid" public keys. Copy/pated from one of our unit test. */ +const static uint8_t s_fake_x_ecdsa_p256[] = { + 0xd0, 0x72, 0x0d, 0xc6, 0x91, 0xaa, 0x80, 0x09, 0x6b, 0xa3, 0x2f, 0xed, 0x1c, 0xb9, 0x7c, 0x2b, + 0x62, 0x06, 0x90, 0xd0, 0x6d, 0xe0, 0x31, 0x7b, 0x86, 0x18, 0xd5, 0xce, 0x65, 0xeb, 0x72, 0x8f, +}; + +const static uint8_t s_fake_y_ecdsa_p256[] = { + 0x96, 0x81, 0xb5, 0x17, 0xb1, 0xcd, 0xa1, 0x7d, 0x0d, 0x83, 0xd3, 0x35, 0xd9, 0xc4, 0xa8, 0xa9, + 0xa9, 0xb0, 0xb1, 0xb3, 0xc7, 0x10, 0x6d, 0x8f, 0x3c, 0x72, 0xbc, 0x50, 0x93, 0xdc, 0x27, 0x5f, +}; + +const static uint8_t s_fake_x_ecdsa_p384[] = { + 0xfd, 0x3c, 0x84, 0xe5, 0x68, 0x9b, 0xed, 0x27, 0x0e, 0x60, 0x1b, 0x3d, 0x80, 0xf9, 0x0d, 0x67, + 0xa9, 0xae, 0x45, 0x1c, 0xce, 0x89, 0x0f, 0x53, 0xe5, 0x83, 0x22, 0x9a, 0xd0, 0xe2, 0xee, 0x64, + 0x56, 0x11, 0xfa, 0x99, 0x36, 0xdf, 0xa4, 0x53, 0x06, 0xec, 0x18, 0x06, 0x67, 0x74, 0xaa, 0x24, +}; + +const static uint8_t s_fake_y_ecdsa_p384[] = { + 0xb8, 0x3c, 0xa4, 0x12, 0x6c, 0xfc, 0x4c, 0x4d, 0x1d, 0x18, 0xa4, 0xb6, 0xc2, 0x1c, 0x7f, 0x69, + 0x9d, 0x51, 0x23, 0xdd, 0x9c, 0x24, 0xf6, 0x6f, 0x83, 0x38, 0x46, 0xee, 0xb5, 0x82, 0x96, 0x19, + 0x6b, 0x42, 0xec, 0x06, 0x42, 0x5d, 0xb5, 0xb7, 0x0a, 0x4b, 0x81, 0xb7, 0xfc, 0xf7, 0x05, 0xa0, +}; + static int s_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, @@ -173,15 +193,13 @@ static struct commoncrypto_ecc_key_pair *s_alloc_pair_and_init_buffers( goto error; } - memset(cc_key_pair->key_pair.key_buf.buffer, 0, cc_key_pair->key_pair.key_buf.len); - aws_byte_buf_write_u8(&cc_key_pair->key_pair.key_buf, s_preamble); if (pub_x.ptr && pub_y.ptr) { aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &pub_x); aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &pub_y); } else { - cc_key_pair->key_pair.key_buf.len += s_key_coordinate_size * 2; + aws_byte_buf_write_u8_n(&cc_key_pair->key_pair.key_buf, 0x0, s_key_coordinate_size * 2); } if (priv_key.ptr) { @@ -213,10 +231,40 @@ struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key_impl( enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key) { - struct aws_byte_cursor empty_cur; - AWS_ZERO_STRUCT(empty_cur); + /** + * We use SecCreateKeyWithData to create ECC key. Expected format for the key passed to that api is a byte buffer + * consisting of "0x04 | x | y | p", where x,y is public pair and p is private key. + * + * In this case we only have private key and we need to construct SecKey from that. + * + * We used to just pass 0,0 point for x,y, i.e. "0x04 | 0 | 0 | p". + * + * This used to work on Macs before 14, but in 14+ SecCreateKeyWithData returns error, + * which is reasonable since 0,0 is not a valid public point. + * + * To get around the issue, we use a fake public key, which is a valid public point, but not matching the private + * key as a quick workaround. + */ + struct aws_byte_cursor fake_pub_x; + AWS_ZERO_STRUCT(fake_pub_x); + struct aws_byte_cursor fake_pub_y; + AWS_ZERO_STRUCT(fake_pub_y); + switch (curve_name) { + case AWS_CAL_ECDSA_P256: + fake_pub_x = aws_byte_cursor_from_array(s_fake_x_ecdsa_p256, AWS_ARRAY_SIZE(s_fake_x_ecdsa_p256)); + fake_pub_y = aws_byte_cursor_from_array(s_fake_y_ecdsa_p256, AWS_ARRAY_SIZE(s_fake_y_ecdsa_p256)); + break; + case AWS_CAL_ECDSA_P384: + fake_pub_x = aws_byte_cursor_from_array(s_fake_x_ecdsa_p384, AWS_ARRAY_SIZE(s_fake_x_ecdsa_p384)); + fake_pub_y = aws_byte_cursor_from_array(s_fake_y_ecdsa_p384, AWS_ARRAY_SIZE(s_fake_y_ecdsa_p384)); + break; + default: + aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + return NULL; + } + struct commoncrypto_ecc_key_pair *cc_key_pair = - s_alloc_pair_and_init_buffers(allocator, curve_name, empty_cur, empty_cur, *priv_key); + s_alloc_pair_and_init_buffers(allocator, curve_name, fake_pub_x, fake_pub_y, *priv_key); if (!cc_key_pair) { return NULL; @@ -254,6 +302,10 @@ struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key_impl( goto error; } + /* Zero out the fake public keys in the key pair */ + aws_byte_buf_secure_zero(&cc_key_pair->key_pair.pub_x); + aws_byte_buf_secure_zero(&cc_key_pair->key_pair.pub_y); + CFRelease(key_attributes); CFRelease(private_key_data); @@ -336,7 +388,7 @@ error: return NULL; } -#if !defined(AWS_OS_IOS) +#if defined(AWS_OS_MACOS) struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name) { @@ -443,7 +495,6 @@ struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( goto error; } - memset(cc_key_pair->key_pair.key_buf.buffer, 0, cc_key_pair->key_pair.key_buf.len); aws_byte_buf_write_u8(&cc_key_pair->key_pair.key_buf, s_preamble); aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &pub_x); aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &pub_y); @@ -487,7 +538,7 @@ error: s_destroy_key(&cc_key_pair->key_pair); return NULL; } -#endif /* AWS_OS_IOS */ +#endif /* AWS_OS_MACOS */ struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, diff --git a/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_rsa.c b/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_rsa.c new file mode 100644 index 00000000000..c9c02ec981c --- /dev/null +++ b/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_rsa.c @@ -0,0 +1,491 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/cal/private/rsa.h> + +#include <aws/cal/cal.h> +#include <aws/common/encoding.h> + +#include <Security/SecKey.h> +#include <Security/Security.h> + +struct sec_rsa_key_pair { + struct aws_rsa_key_pair base; + CFAllocatorRef cf_allocator; + SecKeyRef priv_key_ref; + SecKeyRef pub_key_ref; +}; + +static void s_rsa_destroy_key(void *key_pair) { + if (key_pair == NULL) { + return; + } + + struct aws_rsa_key_pair *base = key_pair; + struct sec_rsa_key_pair *impl = base->impl; + + if (impl->pub_key_ref) { + CFRelease(impl->pub_key_ref); + } + + if (impl->priv_key_ref) { + CFRelease(impl->priv_key_ref); + } + + if (impl->cf_allocator) { + aws_wrapped_cf_allocator_destroy(impl->cf_allocator); + } + + aws_rsa_key_pair_base_clean_up(base); + + aws_mem_release(base->allocator, impl); +} + +/* + * Transforms security error code into crt error code and raises it as necessary. + * Docs on what security apis can throw are fairly sparse and so far in testing + * it only threw generic -50 error. So just log for now and we can add additional + * error translation later. + */ +static int s_reinterpret_sec_error_as_crt(CFErrorRef error, const char *function_name) { + if (error == NULL) { + return AWS_OP_SUCCESS; + } + + CFIndex error_code = CFErrorGetCode(error); + CFStringRef error_message = CFErrorCopyDescription(error); /* This function never returns NULL */ + + /* + * Note: CFStringGetCStringPtr returns NULL quite often. + * Refer to writeup at the start of CFString.h as to why. + * To reliably get an error message we need to use the following function + * that will copy error string into our buffer. + */ + const char *error_cstr = NULL; + char buffer[128]; + if (CFStringGetCString(error_message, buffer, 128, kCFStringEncodingUTF8)) { + error_cstr = buffer; + } + + int crt_error = AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED; + + /* + * Mac seems throws errSecVerifyFailed for any signature verification + * failures (based on testing and not review of their code). + * Which makes it impossible to distinguish between signature validation + * failure and api call failure. + * So let errSecVerifyFailed as signature validation failure, rather than a + * more generic Crypto Failure as it seems more intuitive to caller that + * signature cannot be verified, rather than something wrong with crypto (and + * in most cases crypto is working correctly, but returning non-specific error). + */ + if (error_code == errSecVerifyFailed) { + crt_error = AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED; + } + + AWS_LOGF_ERROR( + AWS_LS_CAL_RSA, + "%s() failed. CFError:%ld(%s) aws_error:%s", + function_name, + error_code, + error_cstr ? error_cstr : "", + aws_error_name(crt_error)); + + CFRelease(error_message); + + return aws_raise_error(crt_error); +} + +/* + * Maps crt encryption algo enum to Security Framework equivalent. + * Fails with AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM if mapping cannot be done for + * some reason. + * Mapped value is passed back through out variable. + */ +static int s_map_rsa_encryption_algo_to_sec(enum aws_rsa_encryption_algorithm algorithm, SecKeyAlgorithm *out) { + + switch (algorithm) { + case AWS_CAL_RSA_ENCRYPTION_PKCS1_5: + *out = kSecKeyAlgorithmRSAEncryptionPKCS1; + return AWS_OP_SUCCESS; + case AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256: + *out = kSecKeyAlgorithmRSAEncryptionOAEPSHA256; + return AWS_OP_SUCCESS; + case AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512: + *out = kSecKeyAlgorithmRSAEncryptionOAEPSHA512; + return AWS_OP_SUCCESS; + } + + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); +} + +/* + * Maps crt encryption algo enum to Security Framework equivalent. + * Fails with AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM if mapping cannot be done for + * some reason. + * Mapped value is passed back through out variable. + */ +static int s_map_rsa_signing_algo_to_sec(enum aws_rsa_signature_algorithm algorithm, SecKeyAlgorithm *out) { + + switch (algorithm) { + case AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256: + *out = kSecKeyAlgorithmRSASignatureDigestPKCS1v15SHA256; + return AWS_OP_SUCCESS; + case AWS_CAL_RSA_SIGNATURE_PSS_SHA256: +#if (defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && (__MAC_OS_X_VERSION_MAX_ALLOWED >= 101300 /* macOS 10.13 */)) || \ + (defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && (__IPHONE_OS_VERSION_MAX_ALLOWED >= 110000 /* iOS v11 */)) || \ + (defined(__TV_OS_VERSION_MAX_ALLOWED) && (__TV_OS_VERSION_MAX_ALLOWED >= 110000 /* tvos v11 */)) || \ + (defined(__WATCH_OS_VERSION_MAX_ALLOWED) && (__WATCH_OS_VERSION_MAX_ALLOWED >= 40000 /* watchos v4 */)) + if (__builtin_available(macos 10.13, ios 11.0, tvos 11.0, watchos 4.0, *)) { + *out = kSecKeyAlgorithmRSASignatureDigestPSSSHA256; + return AWS_OP_SUCCESS; + } else { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } +#else + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); +#endif + } + + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); +} + +static int s_rsa_encrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor plaintext, + struct aws_byte_buf *out) { + struct sec_rsa_key_pair *key_pair_impl = key_pair->impl; + + if (key_pair_impl->pub_key_ref == NULL) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "RSA Key Pair is missing Public Key required for encrypt operation."); + return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); + } + + SecKeyAlgorithm alg; + if (s_map_rsa_encryption_algo_to_sec(algorithm, &alg)) { + return AWS_OP_ERR; + } + + if (!SecKeyIsAlgorithmSupported(key_pair_impl->pub_key_ref, kSecKeyOperationTypeEncrypt, alg)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Algo is not supported for this operation"); + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + CFDataRef plaintext_ref = + CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, plaintext.ptr, plaintext.len, kCFAllocatorNull); + AWS_FATAL_ASSERT(plaintext_ref); + + CFErrorRef error = NULL; + CFDataRef ciphertext_ref = SecKeyCreateEncryptedData(key_pair_impl->pub_key_ref, alg, plaintext_ref, &error); + if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateEncryptedData")) { + CFRelease(error); + goto on_error; + } + + struct aws_byte_cursor ciphertext_cur = + aws_byte_cursor_from_array(CFDataGetBytePtr(ciphertext_ref), CFDataGetLength(ciphertext_ref)); + + if (aws_byte_buf_append(out, &ciphertext_cur)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto on_error; + } + + CFRelease(plaintext_ref); + CFRelease(ciphertext_ref); + return AWS_OP_SUCCESS; + +on_error: + if (plaintext_ref != NULL) { + CFRelease(plaintext_ref); + } + + if (ciphertext_ref != NULL) { + CFRelease(ciphertext_ref); + } + + return AWS_OP_ERR; +} + +static int s_rsa_decrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor ciphertext, + struct aws_byte_buf *out) { + struct sec_rsa_key_pair *key_pair_impl = key_pair->impl; + + if (key_pair_impl->priv_key_ref == NULL) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "RSA Key Pair is missing Private Key required for encrypt operation."); + return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); + } + + SecKeyAlgorithm alg; + if (s_map_rsa_encryption_algo_to_sec(algorithm, &alg)) { + return AWS_OP_ERR; + } + + if (!SecKeyIsAlgorithmSupported(key_pair_impl->priv_key_ref, kSecKeyOperationTypeDecrypt, alg)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Algo is not supported for this operation"); + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + CFDataRef ciphertext_ref = + CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, ciphertext.ptr, ciphertext.len, kCFAllocatorNull); + AWS_FATAL_ASSERT(ciphertext_ref); + + CFErrorRef error = NULL; + CFDataRef plaintext_ref = SecKeyCreateDecryptedData(key_pair_impl->priv_key_ref, alg, ciphertext_ref, &error); + if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateDecryptedData")) { + CFRelease(error); + goto on_error; + } + + struct aws_byte_cursor plaintext_cur = + aws_byte_cursor_from_array(CFDataGetBytePtr(plaintext_ref), CFDataGetLength(plaintext_ref)); + + if (aws_byte_buf_append(out, &plaintext_cur)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto on_error; + } + + CFRelease(plaintext_ref); + CFRelease(ciphertext_ref); + return AWS_OP_SUCCESS; + +on_error: + if (plaintext_ref != NULL) { + CFRelease(plaintext_ref); + } + + if (ciphertext_ref != NULL) { + CFRelease(ciphertext_ref); + } + + return AWS_OP_ERR; +} + +static int s_rsa_sign( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_buf *out) { + struct sec_rsa_key_pair *key_pair_impl = key_pair->impl; + + if (key_pair_impl->priv_key_ref == NULL) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "RSA Key Pair is missing Private Key required for sign operation."); + return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); + } + + SecKeyAlgorithm alg; + if (s_map_rsa_signing_algo_to_sec(algorithm, &alg)) { + return AWS_OP_ERR; + } + + if (!SecKeyIsAlgorithmSupported(key_pair_impl->priv_key_ref, kSecKeyOperationTypeSign, alg)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Algo is not supported for this operation"); + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + CFDataRef digest_ref = + CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, digest.ptr, digest.len, kCFAllocatorNull); + AWS_FATAL_ASSERT(digest_ref); + + CFErrorRef error = NULL; + CFDataRef signature_ref = SecKeyCreateSignature(key_pair_impl->priv_key_ref, alg, digest_ref, &error); + if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateSignature")) { + CFRelease(error); + goto on_error; + } + + struct aws_byte_cursor signature_cur = + aws_byte_cursor_from_array(CFDataGetBytePtr(signature_ref), CFDataGetLength(signature_ref)); + + if (aws_byte_buf_append(out, &signature_cur)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto on_error; + } + + CFRelease(digest_ref); + CFRelease(signature_ref); + + return AWS_OP_SUCCESS; + +on_error: + CFRelease(digest_ref); + + if (signature_ref != NULL) { + CFRelease(signature_ref); + } + + return AWS_OP_ERR; +} + +static int s_rsa_verify( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_cursor signature) { + struct sec_rsa_key_pair *key_pair_impl = key_pair->impl; + + if (key_pair_impl->pub_key_ref == NULL) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "RSA Key Pair is missing Public Key required for verify operation."); + return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); + } + + SecKeyAlgorithm alg; + if (s_map_rsa_signing_algo_to_sec(algorithm, &alg)) { + return AWS_OP_ERR; + } + + if (!SecKeyIsAlgorithmSupported(key_pair_impl->pub_key_ref, kSecKeyOperationTypeVerify, alg)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Algo is not supported for this operation"); + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + CFDataRef digest_ref = + CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, digest.ptr, digest.len, kCFAllocatorNull); + CFDataRef signature_ref = + CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, signature.ptr, signature.len, kCFAllocatorNull); + AWS_FATAL_ASSERT(digest_ref && signature_ref); + + CFErrorRef error = NULL; + Boolean result = SecKeyVerifySignature(key_pair_impl->pub_key_ref, alg, digest_ref, signature_ref, &error); + + CFRelease(digest_ref); + CFRelease(signature_ref); + if (s_reinterpret_sec_error_as_crt(error, "SecKeyVerifySignature")) { + CFRelease(error); + return AWS_OP_ERR; + } + + return result ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); +} + +static struct aws_rsa_key_vtable s_rsa_key_pair_vtable = { + .encrypt = s_rsa_encrypt, + .decrypt = s_rsa_decrypt, + .sign = s_rsa_sign, + .verify = s_rsa_verify, +}; + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor key) { + struct sec_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct sec_rsa_key_pair)); + + CFMutableDictionaryRef key_attributes = NULL; + CFDataRef private_key_data = NULL; + + aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); + key_pair_impl->base.impl = key_pair_impl; + key_pair_impl->base.allocator = allocator; + key_pair_impl->cf_allocator = aws_wrapped_cf_allocator_new(allocator); + aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.priv, allocator, key); + + private_key_data = CFDataCreate(key_pair_impl->cf_allocator, key.ptr, key.len); + AWS_FATAL_ASSERT(private_key_data); + + key_attributes = CFDictionaryCreateMutable(key_pair_impl->cf_allocator, 0, NULL, NULL); + AWS_FATAL_ASSERT(key_attributes); + + CFDictionaryAddValue(key_attributes, kSecClass, kSecClassKey); + CFDictionaryAddValue(key_attributes, kSecAttrKeyType, kSecAttrKeyTypeRSA); + CFDictionaryAddValue(key_attributes, kSecAttrKeyClass, kSecAttrKeyClassPrivate); + + CFErrorRef error = NULL; + key_pair_impl->priv_key_ref = SecKeyCreateWithData(private_key_data, key_attributes, &error); + if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateWithData")) { + CFRelease(error); + goto on_error; + } + + key_pair_impl->pub_key_ref = SecKeyCopyPublicKey(key_pair_impl->priv_key_ref); + AWS_FATAL_ASSERT(key_pair_impl->pub_key_ref); + + key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; + size_t block_size = SecKeyGetBlockSize(key_pair_impl->priv_key_ref); + + if (block_size < (AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS / 8) || + block_size > (AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS / 8)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Unsupported key size: %zu", block_size); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + goto on_error; + } + + key_pair_impl->base.key_size_in_bits = block_size * 8; + + CFRelease(key_attributes); + CFRelease(private_key_data); + + return &key_pair_impl->base; + +on_error: + if (private_key_data) { + CFRelease(private_key_data); + } + + if (key_attributes) { + CFRelease(key_attributes); + } + s_rsa_destroy_key(&key_pair_impl->base); + return NULL; +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor key) { + struct sec_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct sec_rsa_key_pair)); + + CFMutableDictionaryRef key_attributes = NULL; + CFDataRef public_key_data = NULL; + + aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); + key_pair_impl->base.impl = key_pair_impl; + key_pair_impl->base.allocator = allocator; + key_pair_impl->cf_allocator = aws_wrapped_cf_allocator_new(allocator); + aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.pub, allocator, key); + + public_key_data = CFDataCreate(key_pair_impl->cf_allocator, key.ptr, key.len); + AWS_FATAL_ASSERT(public_key_data); + + key_attributes = CFDictionaryCreateMutable(key_pair_impl->cf_allocator, 0, NULL, NULL); + AWS_FATAL_ASSERT(key_attributes); + + CFDictionaryAddValue(key_attributes, kSecClass, kSecClassKey); + CFDictionaryAddValue(key_attributes, kSecAttrKeyType, kSecAttrKeyTypeRSA); + CFDictionaryAddValue(key_attributes, kSecAttrKeyClass, kSecAttrKeyClassPublic); + + CFErrorRef error = NULL; + key_pair_impl->pub_key_ref = SecKeyCreateWithData(public_key_data, key_attributes, &error); + if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateWithData")) { + CFRelease(error); + goto on_error; + } + + key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; + size_t block_size = SecKeyGetBlockSize(key_pair_impl->pub_key_ref); + if (block_size < (AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS / 8) || + block_size > (AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS / 8)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Unsupported key size: %zu", block_size); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + goto on_error; + } + key_pair_impl->base.key_size_in_bits = block_size * 8; + + CFRelease(key_attributes); + CFRelease(public_key_data); + + return &key_pair_impl->base; + +on_error: + if (public_key_data) { + CFRelease(public_key_data); + } + + if (key_attributes) { + CFRelease(key_attributes); + } + s_rsa_destroy_key(&key_pair_impl->base); + return NULL; +} diff --git a/contrib/restricted/aws/aws-c-cal/source/der.c b/contrib/restricted/aws/aws-c-cal/source/der.c index 546a5685b5e..15fbcd7c1ea 100644 --- a/contrib/restricted/aws/aws-c-cal/source/der.c +++ b/contrib/restricted/aws/aws-c-cal/source/der.c @@ -36,11 +36,23 @@ struct der_tlv { uint8_t *value; }; -static void s_decode_tlv(struct der_tlv *tlv) { +static int s_decode_tlv(struct der_tlv *tlv) { if (tlv->tag == AWS_DER_INTEGER) { + if (tlv->length == 0) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + uint8_t first_byte = tlv->value[0]; - /* if the first byte is 0, it just denotes unsigned and should be removed */ - if (first_byte == 0x00) { + if (first_byte & 0x80) { + return aws_raise_error(AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT); + } + + /* if its multibyte int and first byte is 0, strip it since it was added + * to indicate to der that it is positive number. + * if len is 1 and first byte is 0, then the number is just zero, so + * leave it as is. + */ + if (tlv->length > 1 && first_byte == 0x00) { tlv->length -= 1; tlv->value += 1; } @@ -49,6 +61,8 @@ static void s_decode_tlv(struct der_tlv *tlv) { tlv->length -= 1; tlv->value += 1; } + + return AWS_OP_SUCCESS; } static int s_der_read_tlv(struct aws_byte_cursor *cur, struct der_tlv *tlv) { @@ -56,10 +70,10 @@ static int s_der_read_tlv(struct aws_byte_cursor *cur, struct der_tlv *tlv) { uint8_t len_bytes = 0; uint32_t len = 0; if (!aws_byte_cursor_read_u8(cur, &tag)) { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } if (!aws_byte_cursor_read_u8(cur, &len_bytes)) { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } /* if the sign bit is set, then the first byte is the number of bytes required to store * the length */ @@ -88,10 +102,16 @@ static int s_der_read_tlv(struct aws_byte_cursor *cur, struct der_tlv *tlv) { len = len_bytes; } + if (len > cur->len) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + tlv->tag = tag; tlv->length = len; tlv->value = (tag == AWS_DER_NULL) ? NULL : cur->ptr; - s_decode_tlv(tlv); + if (s_decode_tlv(tlv)) { + return AWS_OP_ERR; + } aws_byte_cursor_advance(cur, len); return AWS_OP_SUCCESS; @@ -222,7 +242,7 @@ void aws_der_encoder_destroy(struct aws_der_encoder *encoder) { aws_mem_release(encoder->allocator, encoder); } -int aws_der_encoder_write_integer(struct aws_der_encoder *encoder, struct aws_byte_cursor integer) { +int aws_der_encoder_write_unsigned_integer(struct aws_der_encoder *encoder, struct aws_byte_cursor integer) { AWS_FATAL_ASSERT(integer.len <= UINT32_MAX); struct der_tlv tlv = { .tag = AWS_DER_INTEGER, @@ -391,12 +411,13 @@ int s_parse_cursor(struct aws_der_decoder *decoder, struct aws_byte_cursor cur) while (cur.len) { struct der_tlv tlv = {0}; if (s_der_read_tlv(&cur, &tlv)) { - return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + return AWS_OP_ERR; } /* skip trailing newlines in the stream after any TLV */ while (cur.len && *cur.ptr == '\n') { aws_byte_cursor_advance(&cur, 1); } + if (aws_array_list_push_back(&decoder->tlvs, &tlv)) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } @@ -472,7 +493,7 @@ int aws_der_decoder_tlv_string(struct aws_der_decoder *decoder, struct aws_byte_ return AWS_OP_SUCCESS; } -int aws_der_decoder_tlv_integer(struct aws_der_decoder *decoder, struct aws_byte_cursor *integer) { +int aws_der_decoder_tlv_unsigned_integer(struct aws_der_decoder *decoder, struct aws_byte_cursor *integer) { struct der_tlv tlv = s_decoder_tlv(decoder); if (tlv.tag != AWS_DER_INTEGER) { return aws_raise_error(AWS_ERROR_CAL_MISMATCHED_DER_TYPE); diff --git a/contrib/restricted/aws/aws-c-cal/source/hash.c b/contrib/restricted/aws/aws-c-cal/source/hash.c index 37891277323..f6fbd3af593 100644 --- a/contrib/restricted/aws/aws-c-cal/source/hash.c +++ b/contrib/restricted/aws/aws-c-cal/source/hash.c @@ -87,7 +87,7 @@ static inline int compute_hash( struct aws_byte_buf *output, size_t truncate_to) { if (!hash) { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (aws_hash_update(hash, input)) { diff --git a/contrib/restricted/aws/aws-c-cal/source/rsa.c b/contrib/restricted/aws/aws-c-cal/source/rsa.c new file mode 100644 index 00000000000..f24107176f6 --- /dev/null +++ b/contrib/restricted/aws/aws-c-cal/source/rsa.c @@ -0,0 +1,282 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/cal/private/rsa.h> + +#include <aws/cal/cal.h> +#include <aws/cal/hash.h> +#include <aws/cal/private/der.h> + +typedef struct aws_rsa_key_pair *( + aws_rsa_key_pair_new_from_public_pkcs1_fn)(struct aws_allocator *allocator, struct aws_byte_cursor public_key); + +typedef struct aws_rsa_key_pair *( + aws_rsa_key_pair_new_from_private_pkcs1_fn)(struct aws_allocator *allocator, struct aws_byte_cursor private_key); + +#ifndef BYO_CRYPTO + +extern struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor public_key); + +extern struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor private_key); + +#else /* BYO_CRYPTO */ + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor public_key) { + (void)allocator; + (void)public_key; + abort(); +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor private_key) { + (void)allocator; + (void)private_key; + abort(); +} +#endif /* BYO_CRYPTO */ + +static aws_rsa_key_pair_new_from_public_pkcs1_fn *s_rsa_key_pair_new_from_public_key_pkcs1_fn = + aws_rsa_key_pair_new_from_public_key_pkcs1_impl; + +static aws_rsa_key_pair_new_from_private_pkcs1_fn *s_rsa_key_pair_new_from_private_key_pkcs1_fn = + aws_rsa_key_pair_new_from_private_key_pkcs1_impl; + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( + struct aws_allocator *allocator, + struct aws_byte_cursor public_key) { + return s_rsa_key_pair_new_from_public_key_pkcs1_fn(allocator, public_key); +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( + struct aws_allocator *allocator, + struct aws_byte_cursor private_key) { + return s_rsa_key_pair_new_from_private_key_pkcs1_fn(allocator, private_key); +} + +void aws_rsa_key_pair_base_clean_up(struct aws_rsa_key_pair *key_pair) { + aws_byte_buf_clean_up_secure(&key_pair->priv); + aws_byte_buf_clean_up_secure(&key_pair->pub); +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair) { + return aws_ref_count_acquire(&key_pair->ref_count); +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair) { + if (key_pair != NULL) { + aws_ref_count_release(&key_pair->ref_count); + } + return NULL; +} + +size_t aws_rsa_key_pair_max_encrypt_plaintext_size( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm) { + /* + * Per rfc8017, max size of plaintext for encrypt operation is as follows: + * PKCS1-v1_5: (key size in bytes) - 11 + * OAEP: (key size in bytes) - 2 * (hash bytes) - 2 + */ + + size_t key_size_in_bytes = key_pair->key_size_in_bits / 8; + switch (algorithm) { + case AWS_CAL_RSA_ENCRYPTION_PKCS1_5: + return key_size_in_bytes - 11; + case AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256: + return key_size_in_bytes - 2 * (256 / 8) - 2; + case AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512: + return key_size_in_bytes - 2 * (512 / 8) - 2; + default: + AWS_FATAL_ASSERT("Unsupported RSA Encryption Algorithm"); + } + + return 0; +} + +int aws_rsa_key_pair_encrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor plaintext, + struct aws_byte_buf *out) { + AWS_PRECONDITION(key_pair); + AWS_PRECONDITION(out); + + if (AWS_UNLIKELY(aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) < plaintext.len)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Unexpected buffer size. For RSA, ciphertext must not exceed block size"); + return aws_raise_error(AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM); + } + + return key_pair->vtable->encrypt(key_pair, algorithm, plaintext, out); +} + +AWS_CAL_API int aws_rsa_key_pair_decrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor ciphertext, + struct aws_byte_buf *out) { + AWS_PRECONDITION(key_pair); + AWS_PRECONDITION(out); + + if (AWS_UNLIKELY(ciphertext.len != (key_pair->key_size_in_bits / 8))) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Unexpected buffer size. For RSA, ciphertext is expected to match block size."); + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + return key_pair->vtable->decrypt(key_pair, algorithm, ciphertext, out); +} + +int aws_rsa_key_pair_sign_message( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_buf *out) { + AWS_PRECONDITION(key_pair); + AWS_PRECONDITION(out); + + AWS_FATAL_ASSERT( + algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 || algorithm == AWS_CAL_RSA_SIGNATURE_PSS_SHA256); + + if (digest.len > AWS_SHA256_LEN) { + AWS_LOGF_ERROR( + AWS_LS_CAL_RSA, "Unexpected digest size. For RSA, digest length is bound by max size of hash function"); + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + return key_pair->vtable->sign(key_pair, algorithm, digest, out); +} + +int aws_rsa_key_pair_verify_signature( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_cursor signature) { + AWS_PRECONDITION(key_pair); + + return key_pair->vtable->verify(key_pair, algorithm, digest, signature); +} + +size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair) { + AWS_PRECONDITION(key_pair); + return key_pair->key_size_in_bits / 8; +} + +size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair) { + AWS_PRECONDITION(key_pair); + return key_pair->key_size_in_bits / 8; +} + +int aws_rsa_key_pair_get_public_key( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_key_export_format format, + struct aws_byte_buf *out) { + (void)format; /* ignore format for now, since only pkcs1 is supported. */ + AWS_PRECONDITION(key_pair); + AWS_PRECONDITION(out); + + if (key_pair->pub.len == 0) { + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); + } + + aws_byte_buf_init_copy(out, key_pair->allocator, &key_pair->pub); + return AWS_OP_SUCCESS; +} + +int aws_rsa_key_pair_get_private_key( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_key_export_format format, + struct aws_byte_buf *out) { + (void)format; /* ignore format for now, since only pkcs1 is supported. */ + AWS_PRECONDITION(key_pair); + AWS_PRECONDITION(out); + + if (key_pair->priv.len == 0) { + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); + } + + aws_byte_buf_init_copy(out, key_pair->allocator, &key_pair->priv); + return AWS_OP_SUCCESS; +} + +int aws_der_decoder_load_private_rsa_pkcs1(struct aws_der_decoder *decoder, struct aws_rsa_private_key_pkcs1 *out) { + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_type(decoder) != AWS_DER_SEQUENCE) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + struct aws_byte_cursor version_cur; + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &version_cur)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (version_cur.len != 1 || version_cur.ptr[0] != 0) { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT); + } + out->version = 0; + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &(out->modulus))) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->publicExponent)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->privateExponent)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->prime1)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->prime2)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->exponent1)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->exponent2)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->coefficient)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + return AWS_OP_SUCCESS; +} + +int aws_der_decoder_load_public_rsa_pkcs1(struct aws_der_decoder *decoder, struct aws_rsa_public_key_pkcs1 *out) { + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_type(decoder) != AWS_DER_SEQUENCE) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &(out->modulus))) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->publicExponent)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + return AWS_OP_SUCCESS; +} + +int is_valid_rsa_key_size(size_t key_size_in_bits) { + if (key_size_in_bits < AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS || + key_size_in_bits > AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS || key_size_in_bits % 8 != 0) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + return AWS_OP_SUCCESS; +} diff --git a/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c index 78ba7a9ee86..2c6c796af82 100644 --- a/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c +++ b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c @@ -4,6 +4,7 @@ */ #include <aws/cal/private/symmetric_cipher_priv.h> +#define OPENSSL_SUPPRESS_DEPRECATED #include <openssl/evp.h> struct openssl_aes_cipher { diff --git a/contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c index 9a1d43e3d58..60c26af9dd6 100644 --- a/contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c +++ b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c @@ -13,6 +13,15 @@ #include <aws/cal/private/opensslcrypto_common.h> +/* + * OpenSSL 3 has a large amount of interface changes and many of the functions used + * throughout aws-c-cal have become deprecated. + * Lets disable deprecation warnings, so that we can atleast run CI, until we + * can move over to new functions. + */ +#define OPENSSL_SUPPRESS_DEPRECATED +#include <openssl/crypto.h> + static struct openssl_hmac_ctx_table hmac_ctx_table; static struct openssl_evp_md_ctx_table evp_md_ctx_table; @@ -21,23 +30,35 @@ struct openssl_evp_md_ctx_table *g_aws_openssl_evp_md_ctx_table = NULL; static struct aws_allocator *s_libcrypto_allocator = NULL; +#if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) +# define OPENSSL_IS_OPENSSL +#endif + /* weak refs to libcrypto functions to force them to at least try to link * and avoid dead-stripping */ -#if defined(OPENSSL_IS_AWSLC) +#if defined(OPENSSL_IS_AWSLC) || defined(OPENSSL_IS_BORINGSSL) extern HMAC_CTX *HMAC_CTX_new(void) __attribute__((weak, used)); extern void HMAC_CTX_free(HMAC_CTX *) __attribute__((weak, used)); -extern void HMAC_CTX_reset(HMAC_CTX *) __attribute__((weak, used)); extern void HMAC_CTX_init(HMAC_CTX *) __attribute__((weak, used)); extern void HMAC_CTX_cleanup(HMAC_CTX *) __attribute__((weak, used)); extern int HMAC_Update(HMAC_CTX *, const unsigned char *, size_t) __attribute__((weak, used)); extern int HMAC_Final(HMAC_CTX *, unsigned char *, unsigned int *) __attribute__((weak, used)); extern int HMAC_Init_ex(HMAC_CTX *, const void *, size_t, const EVP_MD *, ENGINE *) __attribute__((weak, used)); + +static int s_hmac_init_ex_bssl(HMAC_CTX *ctx, const void *key, size_t key_len, const EVP_MD *md, ENGINE *impl) { + AWS_PRECONDITION(ctx); + + int (*init_ex_pt)(HMAC_CTX *, const void *, size_t, const EVP_MD *, ENGINE *) = (int (*)( + HMAC_CTX *, const void *, size_t, const EVP_MD *, ENGINE *))g_aws_openssl_hmac_ctx_table->impl.init_ex_fn; + + return init_ex_pt(ctx, key, key_len, md, impl); +} + #else /* 1.1 */ extern HMAC_CTX *HMAC_CTX_new(void) __attribute__((weak, used)); extern void HMAC_CTX_free(HMAC_CTX *) __attribute__((weak, used)); -extern int HMAC_CTX_reset(HMAC_CTX *) __attribute__((weak, used)); /* 1.0.2 */ extern void HMAC_CTX_init(HMAC_CTX *) __attribute__((weak, used)); @@ -48,6 +69,23 @@ extern int HMAC_Update(HMAC_CTX *, const unsigned char *, size_t) __attribute__( extern int HMAC_Final(HMAC_CTX *, unsigned char *, unsigned int *) __attribute__((weak, used)); extern int HMAC_Init_ex(HMAC_CTX *, const void *, int, const EVP_MD *, ENGINE *) __attribute__((weak, used)); +static int s_hmac_init_ex_openssl(HMAC_CTX *ctx, const void *key, size_t key_len, const EVP_MD *md, ENGINE *impl) { + AWS_PRECONDITION(ctx); + if (key_len > INT_MAX) { + return 0; + } + + /*Note: unlike aws-lc and boringssl, openssl 1.1.1 and 1.0.2 take int as key + len arg. */ + int (*init_ex_ptr)(HMAC_CTX *, const void *, int, const EVP_MD *, ENGINE *) = + (int (*)(HMAC_CTX *, const void *, int, const EVP_MD *, ENGINE *))g_aws_openssl_hmac_ctx_table->impl.init_ex_fn; + + return init_ex_ptr(ctx, key, (int)key_len, md, impl); +} + +#endif /* !OPENSSL_IS_AWSLC && !OPENSSL_IS_BORINGSSL*/ + +#if !defined(OPENSSL_IS_AWSLC) /* libcrypto 1.1 stub for init */ static void s_hmac_ctx_init_noop(HMAC_CTX *ctx) { (void)ctx; @@ -57,7 +95,9 @@ static void s_hmac_ctx_init_noop(HMAC_CTX *ctx) { static void s_hmac_ctx_clean_up_noop(HMAC_CTX *ctx) { (void)ctx; } +#endif +#if defined(OPENSSL_IS_OPENSSL) /* libcrypto 1.0 shim for new */ static HMAC_CTX *s_hmac_ctx_new(void) { AWS_PRECONDITION( @@ -79,18 +119,6 @@ static void s_hmac_ctx_free(HMAC_CTX *ctx) { aws_mem_release(s_libcrypto_allocator, ctx); } -/* libcrypto 1.0 shim for reset, matches HMAC_CTX_reset semantics */ -static int s_hmac_ctx_reset(HMAC_CTX *ctx) { - AWS_PRECONDITION(ctx); - AWS_PRECONDITION( - g_aws_openssl_hmac_ctx_table->init_fn != s_hmac_ctx_init_noop && - g_aws_openssl_hmac_ctx_table->clean_up_fn != s_hmac_ctx_clean_up_noop && - "libcrypto 1.0 reset called on libcrypto 1.1 vtable"); - g_aws_openssl_hmac_ctx_table->clean_up_fn(ctx); - g_aws_openssl_hmac_ctx_table->init_fn(ctx); - return 1; -} - #endif /* !OPENSSL_IS_AWSLC */ enum aws_libcrypto_version { @@ -98,15 +126,16 @@ enum aws_libcrypto_version { AWS_LIBCRYPTO_1_0_2, AWS_LIBCRYPTO_1_1_1, AWS_LIBCRYPTO_LC, -} s_libcrypto_version = AWS_LIBCRYPTO_NONE; + AWS_LIBCRYPTO_BORINGSSL +}; bool s_resolve_hmac_102(void *module) { -#if !defined(OPENSSL_IS_AWSLC) +#if defined(OPENSSL_IS_OPENSSL) hmac_ctx_init init_fn = (hmac_ctx_init)HMAC_CTX_init; hmac_ctx_clean_up clean_up_fn = (hmac_ctx_clean_up)HMAC_CTX_cleanup; - hmac_ctx_update update_fn = (hmac_ctx_update)HMAC_Update; - hmac_ctx_final final_fn = (hmac_ctx_final)HMAC_Final; - hmac_ctx_init_ex init_ex_fn = (hmac_ctx_init_ex)HMAC_Init_ex; + hmac_update update_fn = (hmac_update)HMAC_Update; + hmac_final final_fn = (hmac_final)HMAC_Final; + hmac_init_ex init_ex_fn = (hmac_init_ex)HMAC_Init_ex; /* were symbols bound by static linking? */ bool has_102_symbols = init_fn && clean_up_fn && update_fn && final_fn && init_ex_fn; @@ -126,7 +155,6 @@ bool s_resolve_hmac_102(void *module) { if (init_fn) { hmac_ctx_table.new_fn = (hmac_ctx_new)s_hmac_ctx_new; - hmac_ctx_table.reset_fn = (hmac_ctx_reset)s_hmac_ctx_reset; hmac_ctx_table.free_fn = s_hmac_ctx_free; hmac_ctx_table.init_fn = init_fn; hmac_ctx_table.clean_up_fn = clean_up_fn; @@ -141,22 +169,20 @@ bool s_resolve_hmac_102(void *module) { } bool s_resolve_hmac_111(void *module) { -#if !defined(OPENSSL_IS_AWSLC) +#if defined(OPENSSL_IS_OPENSSL) hmac_ctx_new new_fn = (hmac_ctx_new)HMAC_CTX_new; hmac_ctx_free free_fn = (hmac_ctx_free)HMAC_CTX_free; - hmac_ctx_reset reset_fn = (hmac_ctx_reset)HMAC_CTX_reset; - hmac_ctx_update update_fn = (hmac_ctx_update)HMAC_Update; - hmac_ctx_final final_fn = (hmac_ctx_final)HMAC_Final; - hmac_ctx_init_ex init_ex_fn = (hmac_ctx_init_ex)HMAC_Init_ex; + hmac_update update_fn = (hmac_update)HMAC_Update; + hmac_final final_fn = (hmac_final)HMAC_Final; + hmac_init_ex init_ex_fn = (hmac_init_ex)HMAC_Init_ex; /* were symbols bound by static linking? */ - bool has_111_symbols = new_fn && free_fn && update_fn && final_fn && init_ex_fn && reset_fn; + bool has_111_symbols = new_fn && free_fn && update_fn && final_fn && init_ex_fn; if (has_111_symbols) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found static libcrypto 1.1.1 HMAC symbols"); } else { *(void **)(&new_fn) = dlsym(module, "HMAC_CTX_new"); - *(void **)(&reset_fn) = dlsym(module, "HMAC_CTX_reset"); *(void **)(&free_fn) = dlsym(module, "HMAC_CTX_free"); *(void **)(&update_fn) = dlsym(module, "HMAC_Update"); *(void **)(&final_fn) = dlsym(module, "HMAC_Final"); @@ -168,13 +194,13 @@ bool s_resolve_hmac_111(void *module) { if (new_fn) { hmac_ctx_table.new_fn = new_fn; - hmac_ctx_table.reset_fn = reset_fn; hmac_ctx_table.free_fn = free_fn; hmac_ctx_table.init_fn = s_hmac_ctx_init_noop; hmac_ctx_table.clean_up_fn = s_hmac_ctx_clean_up_noop; hmac_ctx_table.update_fn = update_fn; hmac_ctx_table.final_fn = final_fn; - hmac_ctx_table.init_ex_fn = init_ex_fn; + hmac_ctx_table.init_ex_fn = s_hmac_init_ex_openssl; + hmac_ctx_table.impl.init_ex_fn = (crypto_generic_fn_ptr)init_ex_fn; g_aws_openssl_hmac_ctx_table = &hmac_ctx_table; return true; } @@ -188,13 +214,12 @@ bool s_resolve_hmac_lc(void *module) { hmac_ctx_clean_up clean_up_fn = (hmac_ctx_clean_up)HMAC_CTX_cleanup; hmac_ctx_new new_fn = (hmac_ctx_new)HMAC_CTX_new; hmac_ctx_free free_fn = (hmac_ctx_free)HMAC_CTX_free; - hmac_ctx_reset reset_fn = (hmac_ctx_reset)HMAC_CTX_reset; - hmac_ctx_update update_fn = (hmac_ctx_update)HMAC_Update; - hmac_ctx_final final_fn = (hmac_ctx_final)HMAC_Final; - hmac_ctx_init_ex init_ex_fn = (hmac_ctx_init_ex)HMAC_Init_ex; + hmac_update update_fn = (hmac_update)HMAC_Update; + hmac_final final_fn = (hmac_final)HMAC_Final; + hmac_init_ex init_ex_fn = (hmac_init_ex)HMAC_Init_ex; /* were symbols bound by static linking? */ - bool has_awslc_symbols = new_fn && free_fn && update_fn && final_fn && init_fn && init_ex_fn && reset_fn; + bool has_awslc_symbols = new_fn && free_fn && update_fn && final_fn && init_fn && init_ex_fn; /* If symbols aren't already found, try to find the requested version */ /* when built as a shared lib, and multiple versions of libcrypto are possibly @@ -203,7 +228,6 @@ bool s_resolve_hmac_lc(void *module) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found static aws-lc HMAC symbols"); } else { *(void **)(&new_fn) = dlsym(module, "HMAC_CTX_new"); - *(void **)(&reset_fn) = dlsym(module, "HMAC_CTX_reset"); *(void **)(&free_fn) = dlsym(module, "HMAC_CTX_free"); *(void **)(&update_fn) = dlsym(module, "HMAC_Update"); *(void **)(&final_fn) = dlsym(module, "HMAC_Final"); @@ -216,13 +240,53 @@ bool s_resolve_hmac_lc(void *module) { if (new_fn) { /* Fill out the vtable for the requested version */ hmac_ctx_table.new_fn = new_fn; - hmac_ctx_table.reset_fn = reset_fn; hmac_ctx_table.free_fn = free_fn; hmac_ctx_table.init_fn = init_fn; hmac_ctx_table.clean_up_fn = clean_up_fn; hmac_ctx_table.update_fn = update_fn; hmac_ctx_table.final_fn = final_fn; - hmac_ctx_table.init_ex_fn = init_ex_fn; + hmac_ctx_table.init_ex_fn = s_hmac_init_ex_bssl; + hmac_ctx_table.impl.init_ex_fn = (crypto_generic_fn_ptr)init_ex_fn; + g_aws_openssl_hmac_ctx_table = &hmac_ctx_table; + return true; + } +#endif + return false; +} + +bool s_resolve_hmac_boringssl(void *module) { +#if defined(OPENSSL_IS_BORINGSSL) + hmac_ctx_new new_fn = (hmac_ctx_new)HMAC_CTX_new; + hmac_ctx_free free_fn = (hmac_ctx_free)HMAC_CTX_free; + hmac_update update_fn = (hmac_update)HMAC_Update; + hmac_final final_fn = (hmac_final)HMAC_Final; + hmac_init_ex init_ex_fn = (hmac_init_ex)HMAC_Init_ex; + + /* were symbols bound by static linking? */ + bool has_bssl_symbols = new_fn && free_fn && update_fn && final_fn && init_ex_fn; + + if (has_bssl_symbols) { + AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found static boringssl HMAC symbols"); + } else { + *(void **)(&new_fn) = dlsym(module, "HMAC_CTX_new"); + *(void **)(&free_fn) = dlsym(module, "HMAC_CTX_free"); + *(void **)(&update_fn) = dlsym(module, "HMAC_Update"); + *(void **)(&final_fn) = dlsym(module, "HMAC_Final"); + *(void **)(&init_ex_fn) = dlsym(module, "HMAC_Init_ex"); + if (new_fn) { + AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found dynamic boringssl HMAC symbols"); + } + } + + if (new_fn) { + hmac_ctx_table.new_fn = new_fn; + hmac_ctx_table.free_fn = free_fn; + hmac_ctx_table.init_fn = s_hmac_ctx_init_noop; + hmac_ctx_table.clean_up_fn = s_hmac_ctx_clean_up_noop; + hmac_ctx_table.update_fn = update_fn; + hmac_ctx_table.final_fn = final_fn; + hmac_ctx_table.init_ex_fn = s_hmac_init_ex_bssl; + hmac_ctx_table.impl.init_ex_fn = (crypto_generic_fn_ptr)init_ex_fn; g_aws_openssl_hmac_ctx_table = &hmac_ctx_table; return true; } @@ -238,6 +302,8 @@ static enum aws_libcrypto_version s_resolve_libcrypto_hmac(enum aws_libcrypto_ve return s_resolve_hmac_111(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_1_0_2: return s_resolve_hmac_102(module) ? version : AWS_LIBCRYPTO_NONE; + case AWS_LIBCRYPTO_BORINGSSL: + return s_resolve_hmac_boringssl(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_NONE: AWS_FATAL_ASSERT(!"Attempted to resolve invalid libcrypto HMAC API version AWS_LIBCRYPTO_NONE"); } @@ -386,6 +452,14 @@ bool s_resolve_md_lc(void *module) { return false; } +bool s_resolve_md_boringssl(void *module) { +#if !defined(OPENSSL_IS_AWSLC) + return s_resolve_md_111(module); +#else + return false; +#endif +} + static enum aws_libcrypto_version s_resolve_libcrypto_md(enum aws_libcrypto_version version, void *module) { switch (version) { case AWS_LIBCRYPTO_LC: @@ -394,6 +468,8 @@ static enum aws_libcrypto_version s_resolve_libcrypto_md(enum aws_libcrypto_vers return s_resolve_md_111(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_1_0_2: return s_resolve_md_102(module) ? version : AWS_LIBCRYPTO_NONE; + case AWS_LIBCRYPTO_BORINGSSL: + return s_resolve_md_boringssl(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_NONE: AWS_FATAL_ASSERT(!"Attempted to resolve invalid libcrypto MD API version AWS_LIBCRYPTO_NONE"); } @@ -479,31 +555,26 @@ static enum aws_libcrypto_version s_resolve_libcrypto_lib(void) { return AWS_LIBCRYPTO_NONE; } -static void *s_libcrypto_module = NULL; - static enum aws_libcrypto_version s_resolve_libcrypto(void) { - if (s_libcrypto_version != AWS_LIBCRYPTO_NONE) { - return s_libcrypto_version; - } - /* Try to auto-resolve against what's linked in/process space */ AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "searching process and loaded modules"); void *process = dlopen(NULL, RTLD_NOW); -#if 0 - // dlopen is not supported in musl. It's ok to pass NULL to s_resolve_libcrypto_symbols, - // as dlsym handles it well according to man. - AWS_FATAL_ASSERT(process && "Unable to load symbols from process space"); -#endif enum aws_libcrypto_version result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_LC, process); if (result == AWS_LIBCRYPTO_NONE) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "did not find aws-lc symbols linked"); + result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_BORINGSSL, process); + } + if (result == AWS_LIBCRYPTO_NONE) { + AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "did not find boringssl symbols linked"); result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_1_0_2, process); } if (result == AWS_LIBCRYPTO_NONE) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "did not find libcrypto 1.0.2 symbols linked"); result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_1_1_1, process); } - dlclose(process); + if (process) { + dlclose(process); + } if (result == AWS_LIBCRYPTO_NONE) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "did not find libcrypto 1.1.1 symbols linked"); @@ -523,7 +594,7 @@ static enum aws_libcrypto_version s_resolve_libcrypto(void) { #endif /* Openssl 1.0.x requires special handling for its locking callbacks or else it's not thread safe */ -#if !defined(OPENSSL_IS_AWSLC) +#if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) static struct aws_mutex *s_libcrypto_locks = NULL; static void s_locking_fn(int mode, int n, const char *unused0, int unused1) { @@ -550,7 +621,7 @@ void aws_cal_platform_init(struct aws_allocator *allocator) { s_libcrypto_allocator = allocator; -#if !defined(OPENSSL_IS_AWSLC) +#if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) /* Ensure that libcrypto 1.0.2 has working locking mechanisms. This code is macro'ed * by libcrypto to be a no-op on 1.1.1 */ if (!CRYPTO_get_locking_callback()) { @@ -572,8 +643,37 @@ void aws_cal_platform_init(struct aws_allocator *allocator) { #endif } +/* + * Shutdown any resources before unloading CRT (ex. dlclose). + * This is currently aws-lc specific. + * Ex. why we need it: + * aws-lc uses thread local data extensively and registers thread atexit + * callback to clean it up. + * there are cases where crt gets dlopen'ed and then dlclose'ed within a larger program + * (ex. nodejs workers). + * with glibc, dlclose actually removes symbols from global space (musl does not). + * once crt is unloaded, thread atexit will no longer point at a valid aws-lc + * symbol and will happily crash when thread is closed. + * AWSLC_thread_local_shutdown was added by aws-lc to let teams remove thread + * local data manually before lib is unloaded. + * We can't call AWSLC_thread_local_shutdown in cal cleanup because it renders + * aws-lc unusable and there is no way to reinitilize aws-lc to a working state, + * i.e. everything that depends on aws-lc stops working after shutdown (ex. curl). + * So instead rely on GCC/Clang destructor extension to shutdown right before + * crt gets unloaded. Does not work on msvc, but thats a bridge we can cross at + * a later date (since we dont support aws-lc on win right now) + * TODO: do already init'ed check on lc similar to what we do for s2n, so we + * only shutdown when we initialized aws-lc. currently not possible because + * there is no way to check that aws-lc has been initialized. + */ +void __attribute__((destructor)) s_cal_crypto_shutdown(void) { +#if defined(OPENSSL_IS_AWSLC) + AWSLC_thread_local_shutdown(); +#endif +} + void aws_cal_platform_clean_up(void) { -#if !defined(OPENSSL_IS_AWSLC) +#if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) if (CRYPTO_get_locking_callback() == s_locking_fn) { CRYPTO_set_locking_callback(NULL); size_t lock_count = (size_t)CRYPTO_num_locks(); @@ -588,12 +688,19 @@ void aws_cal_platform_clean_up(void) { } #endif - if (s_libcrypto_module) { - dlclose(s_libcrypto_module); - } +#if defined(OPENSSL_IS_AWSLC) + AWSLC_thread_local_clear(); +#endif s_libcrypto_allocator = NULL; } + +void aws_cal_platform_thread_clean_up(void) { +#if defined(OPENSSL_IS_AWSLC) + AWSLC_thread_local_clear(); +#endif +} + #if !defined(__GNUC__) || (__GNUC__ >= 4 && __GNUC_MINOR__ > 1) # pragma GCC diagnostic pop #endif diff --git a/contrib/restricted/aws/aws-c-cal/source/unix/openssl_rsa.c b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_rsa.c new file mode 100644 index 00000000000..9d891677558 --- /dev/null +++ b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_rsa.c @@ -0,0 +1,450 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/cal/private/opensslcrypto_common.h> +#include <aws/cal/private/rsa.h> + +#include <aws/cal/cal.h> +#include <aws/common/encoding.h> + +#define OPENSSL_SUPPRESS_DEPRECATED +#include <openssl/err.h> +#include <openssl/evp.h> + +#if defined(OPENSSL_IS_OPENSSL) +/*Error defines were part of evp.h in 1.0.x and were moved to evperr.h in 1.1.0*/ +# if OPENSSL_VERSION_NUMBER >= 0x10100000L +# include <openssl/evperr.h> +# endif +#else +# error #include <openssl/evp_errors.h> +#endif + +#include <openssl/rsa.h> + +struct lc_rsa_key_pair { + struct aws_rsa_key_pair base; + EVP_PKEY *key; +}; + +static void s_rsa_destroy_key(void *key_pair) { + if (key_pair == NULL) { + return; + } + + struct aws_rsa_key_pair *base = key_pair; + struct lc_rsa_key_pair *impl = base->impl; + + if (impl->key != NULL) { + EVP_PKEY_free(impl->key); + } + + aws_rsa_key_pair_base_clean_up(base); + + aws_mem_release(base->allocator, impl); +} + +/* + * Transforms evp error code into crt error code and raises it as necessary. + * All evp functions follow the same: + * >= 1 for success + * <= 0 for failure + * -2 always indicates incorrect algo for operation + */ +static int s_reinterpret_evp_error_as_crt(int evp_error, const char *function_name) { + if (evp_error > 0) { + return AWS_OP_SUCCESS; + } + + /* AWS-LC/BoringSSL error code is uint32_t, but OpenSSL uses unsigned long. */ +#if defined(OPENSSL_IS_OPENSSL) + uint32_t error = ERR_peek_error(); +#else + unsigned long error = ERR_peek_error(); +#endif + + int crt_error = AWS_OP_ERR; + const char *error_message = ERR_reason_error_string(error); + + if (evp_error == -2) { + crt_error = AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM; + goto on_error; + } + + if (ERR_GET_LIB(error) == ERR_LIB_EVP) { + switch (ERR_GET_REASON(error)) { + case EVP_R_BUFFER_TOO_SMALL: { + crt_error = AWS_ERROR_SHORT_BUFFER; + goto on_error; + } + case EVP_R_UNSUPPORTED_ALGORITHM: { + crt_error = AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM; + goto on_error; + } + } + } + + crt_error = AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED; + +on_error: + AWS_LOGF_ERROR( + AWS_LS_CAL_RSA, + "%s() failed. returned: %d extended error:%lu(%s) aws_error:%s", + function_name, + evp_error, + (unsigned long)error, + error_message == NULL ? "" : error_message, + aws_error_name(crt_error)); + + return aws_raise_error(crt_error); +} + +static int s_set_encryption_ctx_from_algo(EVP_PKEY_CTX *ctx, enum aws_rsa_encryption_algorithm algorithm) { + if (algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5) { + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING), "EVP_PKEY_CTX_set_rsa_padding")) { + return AWS_OP_ERR; + } + + } else if (algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 || algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512) { + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_OAEP_PADDING), "EVP_PKEY_CTX_set_rsa_padding")) { + return AWS_OP_ERR; + } + + const EVP_MD *md = algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 ? EVP_sha256() : EVP_sha512(); + if (s_reinterpret_evp_error_as_crt(EVP_PKEY_CTX_set_rsa_oaep_md(ctx, md), "EVP_PKEY_CTX_set_rsa_oaep_md")) { + return AWS_OP_ERR; + } + } else { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + return AWS_OP_SUCCESS; +} + +static int s_rsa_encrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor plaintext, + struct aws_byte_buf *out) { + struct lc_rsa_key_pair *key_pair_impl = key_pair->impl; + + EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(key_pair_impl->key, NULL); + if (ctx == NULL) { + return aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + } + + if (s_reinterpret_evp_error_as_crt(EVP_PKEY_encrypt_init(ctx), "EVP_PKEY_encrypt_init")) { + goto on_error; + } + + if (s_set_encryption_ctx_from_algo(ctx, algorithm)) { + goto on_error; + } + + size_t needed_buffer_len = 0; + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_encrypt(ctx, NULL, &needed_buffer_len, plaintext.ptr, plaintext.len), + "EVP_PKEY_encrypt get length")) { + goto on_error; + } + + size_t ct_len = out->capacity - out->len; + if (needed_buffer_len > ct_len) { + /* + * OpenSSL 3 seems to no longer fail if the buffer is too short. + * Instead it seems to write out enough data to fill the buffer and then + * updates the out_len to full buffer. It does not seem to corrupt + * memory after the buffer, but behavior is non-ideal. + * Let get length needed for buffer from api first and then manually ensure that + * buffer we have is big enough. + */ + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto on_error; + } + + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_encrypt(ctx, out->buffer + out->len, &ct_len, plaintext.ptr, plaintext.len), "EVP_PKEY_encrypt")) { + goto on_error; + } + out->len += ct_len; + + EVP_PKEY_CTX_free(ctx); + return AWS_OP_SUCCESS; + +on_error: + EVP_PKEY_CTX_free(ctx); + return AWS_OP_ERR; +} + +static int s_rsa_decrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor ciphertext, + struct aws_byte_buf *out) { + struct lc_rsa_key_pair *key_pair_impl = key_pair->impl; + + EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(key_pair_impl->key, NULL); + if (ctx == NULL) { + return aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + } + + if (s_reinterpret_evp_error_as_crt(EVP_PKEY_decrypt_init(ctx), "EVP_PKEY_decrypt_init")) { + goto on_error; + } + + if (s_set_encryption_ctx_from_algo(ctx, algorithm)) { + goto on_error; + } + + size_t needed_buffer_len = 0; + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_decrypt(ctx, NULL, &needed_buffer_len, ciphertext.ptr, ciphertext.len), + "EVP_PKEY_decrypt get length")) { + goto on_error; + } + + size_t ct_len = out->capacity - out->len; + if (needed_buffer_len > ct_len) { + /* + * manual short buffer length check for OpenSSL 3. + * refer to encrypt implementation for more details + */ + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto on_error; + } + + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_decrypt(ctx, out->buffer + out->len, &ct_len, ciphertext.ptr, ciphertext.len), + "EVP_PKEY_decrypt")) { + goto on_error; + } + out->len += ct_len; + + EVP_PKEY_CTX_free(ctx); + return AWS_OP_SUCCESS; + +on_error: + EVP_PKEY_CTX_free(ctx); + return AWS_OP_ERR; +} + +static int s_set_signature_ctx_from_algo(EVP_PKEY_CTX *ctx, enum aws_rsa_signature_algorithm algorithm) { + if (algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256) { + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING), "EVP_PKEY_CTX_set_rsa_padding")) { + return AWS_OP_ERR; + } + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_signature_md(ctx, EVP_sha256()), "EVP_PKEY_CTX_set_signature_md")) { + return AWS_OP_ERR; + } + } else if (algorithm == AWS_CAL_RSA_SIGNATURE_PSS_SHA256) { + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PSS_PADDING), "EVP_PKEY_CTX_set_rsa_padding")) { + return AWS_OP_ERR; + } + +#if defined(OPENSSL_IS_BORINGSSL) || OPENSSL_VERSION_NUMBER < 0x10100000L + int saltlen = -1; /* RSA_PSS_SALTLEN_DIGEST not defined in BoringSSL and old versions of openssl */ +#else + int saltlen = RSA_PSS_SALTLEN_DIGEST; +#endif + + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, saltlen), "EVP_PKEY_CTX_set_rsa_pss_saltlen")) { + return AWS_OP_ERR; + } + + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_signature_md(ctx, EVP_sha256()), "EVP_PKEY_CTX_set_signature_md")) { + return AWS_OP_ERR; + } + } else { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + return AWS_OP_SUCCESS; +} + +static int s_rsa_sign( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_buf *out) { + struct lc_rsa_key_pair *key_pair_impl = key_pair->impl; + + EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(key_pair_impl->key, NULL); + if (ctx == NULL) { + return aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + } + + if (s_reinterpret_evp_error_as_crt(EVP_PKEY_sign_init(ctx), "EVP_PKEY_sign_init")) { + goto on_error; + } + + if (s_set_signature_ctx_from_algo(ctx, algorithm)) { + goto on_error; + } + + size_t needed_buffer_len = 0; + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_sign(ctx, NULL, &needed_buffer_len, digest.ptr, digest.len), "EVP_PKEY_sign get length")) { + goto on_error; + } + + size_t ct_len = out->capacity - out->len; + if (needed_buffer_len > ct_len) { + /* + * manual short buffer length check for OpenSSL 3. + * refer to encrypt implementation for more details. + * OpenSSL3 actually does throw an error here, but error code comes from + * component that does not exist in OpenSSL 1.x. So check manually right + * now and we can figure out how to handle it better, once we can + * properly support OpenSSL 3. + */ + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto on_error; + } + + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_sign(ctx, out->buffer + out->len, &ct_len, digest.ptr, digest.len), "EVP_PKEY_sign")) { + goto on_error; + } + out->len += ct_len; + + EVP_PKEY_CTX_free(ctx); + return AWS_OP_SUCCESS; + +on_error: + EVP_PKEY_CTX_free(ctx); + return AWS_OP_ERR; +} + +static int s_rsa_verify( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_cursor signature) { + struct lc_rsa_key_pair *key_pair_impl = key_pair->impl; + + EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(key_pair_impl->key, NULL); + if (ctx == NULL) { + return aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + } + + if (s_reinterpret_evp_error_as_crt(EVP_PKEY_verify_init(ctx), "EVP_PKEY_verify_init")) { + goto on_error; + } + + if (s_set_signature_ctx_from_algo(ctx, algorithm)) { + goto on_error; + } + + int error_code = EVP_PKEY_verify(ctx, signature.ptr, signature.len, digest.ptr, digest.len); + EVP_PKEY_CTX_free(ctx); + + /* Verify errors slightly differently from the rest of evp functions. + * 0 indicates signature does not pass verification, it's not necessarily an error. */ + if (error_code > 0) { + return AWS_OP_SUCCESS; + } else if (error_code == 0) { + return aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); + } else { + return s_reinterpret_evp_error_as_crt(error_code, "EVP_PKEY_verify"); + } + +on_error: + EVP_PKEY_CTX_free(ctx); + return AWS_OP_ERR; +} + +static struct aws_rsa_key_vtable s_rsa_key_pair_vtable = { + .encrypt = s_rsa_encrypt, + .decrypt = s_rsa_decrypt, + .sign = s_rsa_sign, + .verify = s_rsa_verify, +}; + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor key) { + struct lc_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct lc_rsa_key_pair)); + + aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); + key_pair_impl->base.impl = key_pair_impl; + key_pair_impl->base.allocator = allocator; + aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.priv, allocator, key); + + RSA *rsa = NULL; + EVP_PKEY *private_key = NULL; + + if (d2i_RSAPrivateKey(&rsa, (const uint8_t **)&key.ptr, key.len) == NULL) { + aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + goto on_error; + } + + private_key = EVP_PKEY_new(); + if (private_key == NULL || EVP_PKEY_assign_RSA(private_key, rsa) == 0) { + RSA_free(rsa); + aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + goto on_error; + } + + key_pair_impl->key = private_key; + + key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; + key_pair_impl->base.key_size_in_bits = EVP_PKEY_bits(key_pair_impl->key); + + return &key_pair_impl->base; + +on_error: + if (private_key) { + EVP_PKEY_free(private_key); + } + + s_rsa_destroy_key(&key_pair_impl->base); + return NULL; +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor key) { + struct lc_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct lc_rsa_key_pair)); + + aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); + key_pair_impl->base.impl = key_pair_impl; + key_pair_impl->base.allocator = allocator; + aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.pub, allocator, key); + + RSA *rsa = NULL; + EVP_PKEY *public_key = NULL; + + if (d2i_RSAPublicKey(&rsa, (const uint8_t **)&key.ptr, key.len) == NULL) { + aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + goto on_error; + } + + public_key = EVP_PKEY_new(); + if (public_key == NULL || EVP_PKEY_assign_RSA(public_key, rsa) == 0) { + RSA_free(rsa); + aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + goto on_error; + } + + key_pair_impl->key = public_key; + + key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; + key_pair_impl->base.key_size_in_bits = EVP_PKEY_bits(key_pair_impl->key); + + return &key_pair_impl->base; + +on_error: + if (public_key) { + EVP_PKEY_free(public_key); + } + s_rsa_destroy_key(&key_pair_impl->base); + return NULL; +} diff --git a/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_ecc.c b/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_ecc.c index 931d705bd29..f8d33316ea8 100644 --- a/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_ecc.c +++ b/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_ecc.c @@ -7,6 +7,7 @@ #include <aws/cal/cal.h> #include <aws/cal/private/der.h> +#define OPENSSL_SUPPRESS_DEPRECATED #include <openssl/bn.h> #include <openssl/ec.h> #include <openssl/ecdsa.h> diff --git a/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_hmac.c b/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_hmac.c index 5c5cc3686c7..732ead42a3f 100644 --- a/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_hmac.c +++ b/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_hmac.c @@ -73,7 +73,7 @@ struct aws_hmac *aws_sha256_hmac_default_new(struct aws_allocator *allocator, co hmac->impl = ctx; hmac->good = true; - if (!g_aws_openssl_hmac_ctx_table->init_ex_fn(ctx, secret->ptr, (int)secret->len, EVP_sha256(), NULL)) { + if (!g_aws_openssl_hmac_ctx_table->init_ex_fn(ctx, secret->ptr, secret->len, EVP_sha256(), NULL)) { s_destroy(hmac); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; diff --git a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_aes.c b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_aes.c index aeb646e66a3..0229de3a80d 100644 --- a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_aes.c +++ b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_aes.c @@ -277,9 +277,12 @@ static void s_clear_reusable_components(struct aws_symmetric_cipher *cipher) { } aws_byte_buf_secure_zero(&cipher_impl->overflow); - aws_byte_buf_secure_zero(&cipher_impl->working_mac_buffer); - /* windows handles this, just go ahead and tell the API it's got a length. */ - cipher_impl->working_mac_buffer.len = AWS_AES_256_CIPHER_BLOCK_SIZE; + + if (cipher_impl->working_mac_buffer.capacity != 0) { + aws_byte_buf_secure_zero(&cipher_impl->working_mac_buffer); + /* windows handles this, just go ahead and tell the API it's got a length. */ + cipher_impl->working_mac_buffer.len = AWS_AES_256_CIPHER_BLOCK_SIZE; + } } static int s_reset_cbc_cipher(struct aws_symmetric_cipher *cipher) { diff --git a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_ecc.c b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_ecc.c index a9e890d0556..268b29b5111 100644 --- a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_ecc.c +++ b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_ecc.c @@ -130,11 +130,11 @@ static int s_sign_message( struct aws_byte_cursor integer_cur = aws_byte_cursor_from_array(temp_signature_buf.buffer, coordinate_len); /* trim off the leading zero padding for DER encoding */ integer_cur = aws_byte_cursor_left_trim_pred(&integer_cur, s_trim_zeros_predicate); - aws_der_encoder_write_integer(encoder, integer_cur); + aws_der_encoder_write_unsigned_integer(encoder, integer_cur); integer_cur = aws_byte_cursor_from_array(temp_signature_buf.buffer + coordinate_len, coordinate_len); /* trim off the leading zero padding for DER encoding */ integer_cur = aws_byte_cursor_left_trim_pred(&integer_cur, s_trim_zeros_predicate); - aws_der_encoder_write_integer(encoder, integer_cur); + aws_der_encoder_write_unsigned_integer(encoder, integer_cur); aws_der_encoder_end_sequence(encoder); struct aws_byte_cursor signature_out_cur; @@ -178,8 +178,7 @@ static int s_append_coordinate( size_t leading_zero_count = coordinate_size - coordinate->len; AWS_FATAL_ASSERT(leading_zero_count + buffer->len <= buffer->capacity); - memset(buffer->buffer + buffer->len, 0, leading_zero_count); - buffer->len += leading_zero_count; + aws_byte_buf_write_u8_n(buffer, 0x0, leading_zero_count); } return aws_byte_buf_append(buffer, coordinate); @@ -216,7 +215,7 @@ static int s_verify_signature( /* there will be two coordinates. They need to be concatenated together. */ struct aws_byte_cursor coordinate; AWS_ZERO_STRUCT(coordinate); - if (aws_der_decoder_tlv_integer(decoder, &coordinate)) { + if (aws_der_decoder_tlv_unsigned_integer(decoder, &coordinate)) { aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); goto error; } @@ -230,7 +229,7 @@ static int s_verify_signature( goto error; } AWS_ZERO_STRUCT(coordinate); - if (aws_der_decoder_tlv_integer(decoder, &coordinate)) { + if (aws_der_decoder_tlv_unsigned_integer(decoder, &coordinate)) { aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); goto error; } diff --git a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_platform_init.c b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_platform_init.c index decedcdafa2..f2da2805673 100644 --- a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_platform_init.c +++ b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_platform_init.c @@ -10,3 +10,5 @@ void aws_cal_platform_init(struct aws_allocator *allocator) { } void aws_cal_platform_clean_up(void) {} + +void aws_cal_platform_thread_clean_up(void) {} diff --git a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_rsa.c b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_rsa.c new file mode 100644 index 00000000000..d9e7c8d229f --- /dev/null +++ b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_rsa.c @@ -0,0 +1,422 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/cal/private/rsa.h> + +#include <aws/cal/cal.h> +#include <aws/cal/private/der.h> +#include <aws/common/encoding.h> + +#define WIN32_NO_STATUS +#include <windows.h> +#undef WIN32_NO_STATUS + +#include <bcrypt.h> +#include <ntstatus.h> + +static BCRYPT_ALG_HANDLE s_rsa_alg = NULL; + +static aws_thread_once s_rsa_thread_once = AWS_THREAD_ONCE_STATIC_INIT; + +static void s_load_alg_handle(void *user_data) { + (void)user_data; + /* this function is incredibly slow, LET IT LEAK*/ + NTSTATUS status = BCryptOpenAlgorithmProvider(&s_rsa_alg, BCRYPT_RSA_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); + AWS_FATAL_ASSERT(s_rsa_alg && "BCryptOpenAlgorithmProvider() failed"); + AWS_FATAL_ASSERT(BCRYPT_SUCCESS(status)); +} + +struct bcrypt_rsa_key_pair { + struct aws_rsa_key_pair base; + BCRYPT_KEY_HANDLE key_handle; + struct aws_byte_buf key_buf; +}; + +static void s_rsa_destroy_key(void *key_pair) { + if (key_pair == NULL) { + return; + } + + struct aws_rsa_key_pair *base = key_pair; + struct bcrypt_rsa_key_pair *impl = base->impl; + + if (impl->key_handle) { + BCryptDestroyKey(impl->key_handle); + } + aws_byte_buf_clean_up_secure(&impl->key_buf); + + aws_rsa_key_pair_base_clean_up(base); + + aws_mem_release(base->allocator, impl); +} + +/* + * Transforms bcrypt error code into crt error code and raises it as necessary. + */ +static int s_reinterpret_bc_error_as_crt(NTSTATUS error, const char *function_name) { + if (BCRYPT_SUCCESS(error)) { + return AWS_OP_SUCCESS; + } + + int crt_error = AWS_OP_ERR; + switch (error) { + case STATUS_BUFFER_TOO_SMALL: { + crt_error = AWS_ERROR_SHORT_BUFFER; + goto on_error; + } + case STATUS_NOT_SUPPORTED: { + crt_error = AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM; + goto on_error; + } + } + + crt_error = AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED; + +on_error: + AWS_LOGF_ERROR( + AWS_LS_CAL_RSA, "%s() failed. returned: %X aws_error:%s", function_name, error, aws_error_name(crt_error)); + + return aws_raise_error(crt_error); +} + +static int s_check_encryption_algorithm(enum aws_rsa_encryption_algorithm algorithm) { + if (algorithm != AWS_CAL_RSA_ENCRYPTION_PKCS1_5 && algorithm != AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 && + algorithm != AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512) { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + return AWS_OP_SUCCESS; +} + +static int s_rsa_encrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor plaintext, + struct aws_byte_buf *out) { + struct bcrypt_rsa_key_pair *key_pair_impl = key_pair->impl; + + if (s_check_encryption_algorithm(algorithm)) { + return AWS_OP_ERR; + } + + BCRYPT_OAEP_PADDING_INFO padding_info_oaep = { + .pszAlgId = algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 ? BCRYPT_SHA256_ALGORITHM : BCRYPT_SHA512_ALGORITHM, + .pbLabel = NULL, + .cbLabel = 0}; + + ULONG length_written = 0; + NTSTATUS status = BCryptEncrypt( + key_pair_impl->key_handle, + plaintext.ptr, + (ULONG)plaintext.len, + algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5 ? NULL : &padding_info_oaep, + NULL, + 0, + out->buffer + out->len, + (ULONG)(out->capacity - out->len), + &length_written, + algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5 ? BCRYPT_PAD_PKCS1 : BCRYPT_PAD_OAEP); + + if (s_reinterpret_bc_error_as_crt(status, "BCryptEncrypt")) { + return AWS_OP_ERR; + } + + out->len += length_written; + return AWS_OP_SUCCESS; +} + +static int s_rsa_decrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor ciphertext, + struct aws_byte_buf *out) { + struct bcrypt_rsa_key_pair *key_pair_impl = key_pair->impl; + + /* There is a bug in old versions of BCryptDecrypt, where it does not return + * error status if out buffer is too short. So manually check that buffer is + * large enough. + */ + if ((out->capacity - out->len) < aws_rsa_key_pair_block_length(key_pair)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + if (s_check_encryption_algorithm(algorithm)) { + return AWS_OP_ERR; + } + + BCRYPT_OAEP_PADDING_INFO padding_info_oaep = { + .pszAlgId = algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 ? BCRYPT_SHA256_ALGORITHM : BCRYPT_SHA512_ALGORITHM, + .pbLabel = NULL, + .cbLabel = 0}; + + ULONG length_written = 0; + NTSTATUS status = BCryptDecrypt( + key_pair_impl->key_handle, + ciphertext.ptr, + (ULONG)ciphertext.len, + algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5 ? NULL : &padding_info_oaep, + NULL, + 0, + out->buffer + out->len, + (ULONG)(out->capacity - out->len), + &length_written, + algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5 ? BCRYPT_PAD_PKCS1 : BCRYPT_PAD_OAEP); + + if (s_reinterpret_bc_error_as_crt(status, "BCryptDecrypt")) { + return AWS_OP_ERR; + } + + out->len += length_written; + return AWS_OP_SUCCESS; +} + +union sign_padding_info { + BCRYPT_PKCS1_PADDING_INFO pkcs1; + BCRYPT_PSS_PADDING_INFO pss; +}; + +static int s_sign_padding_info_init(union sign_padding_info *info, enum aws_rsa_signature_algorithm algorithm) { + memset(info, 0, sizeof(union sign_padding_info)); + + if (algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256) { + info->pkcs1.pszAlgId = BCRYPT_SHA256_ALGORITHM; + return AWS_OP_SUCCESS; + } else if (algorithm == AWS_CAL_RSA_SIGNATURE_PSS_SHA256) { + info->pss.pszAlgId = BCRYPT_SHA256_ALGORITHM; + info->pss.cbSalt = 32; + return AWS_OP_SUCCESS; + } + + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); +} + +static int s_rsa_sign( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_buf *out) { + struct bcrypt_rsa_key_pair *key_pair_impl = key_pair->impl; + + union sign_padding_info padding_info; + if (s_sign_padding_info_init(&padding_info, algorithm)) { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + ULONG length_written = 0; + NTSTATUS status = BCryptSignHash( + key_pair_impl->key_handle, + &padding_info, + digest.ptr, + (ULONG)digest.len, + out->buffer + out->len, + (ULONG)(out->capacity - out->len), + (ULONG *)&length_written, + algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 ? BCRYPT_PAD_PKCS1 : BCRYPT_PAD_PSS); + + if (s_reinterpret_bc_error_as_crt(status, "BCryptSignHash")) { + goto on_error; + } + + out->len += length_written; + + return AWS_OP_SUCCESS; + +on_error: + return AWS_OP_ERR; +} + +static int s_rsa_verify( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_cursor signature) { + struct bcrypt_rsa_key_pair *key_pair_impl = key_pair->impl; + + /* BCrypt raises invalid argument if signature does not have correct size. + * Verify size here and raise appropriate error and treat all other errors + * from BCrypt (including invalid arg) in reinterp. */ + if (signature.len != aws_rsa_key_pair_signature_length(key_pair)) { + return aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); + } + + union sign_padding_info padding_info; + if (s_sign_padding_info_init(&padding_info, algorithm)) { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + /* okay, now we've got a windows compatible signature, let's verify it. */ + NTSTATUS status = BCryptVerifySignature( + key_pair_impl->key_handle, + &padding_info, + digest.ptr, + (ULONG)digest.len, + signature.ptr, + (ULONG)signature.len, + algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 ? BCRYPT_PAD_PKCS1 : BCRYPT_PAD_PSS); + + if (status == STATUS_INVALID_SIGNATURE) { + return aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); + } + + if (s_reinterpret_bc_error_as_crt(status, "BCryptVerifySignature")) { + return AWS_OP_ERR; + } + + return AWS_OP_SUCCESS; +} + +static struct aws_rsa_key_vtable s_rsa_key_pair_vtable = { + .encrypt = s_rsa_encrypt, + .decrypt = s_rsa_decrypt, + .sign = s_rsa_sign, + .verify = s_rsa_verify, +}; + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor key) { + + aws_thread_call_once(&s_rsa_thread_once, s_load_alg_handle, NULL); + struct bcrypt_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct bcrypt_rsa_key_pair)); + + aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); + key_pair_impl->base.impl = key_pair_impl; + key_pair_impl->base.allocator = allocator; + aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.priv, allocator, key); + + struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, key); + + if (!decoder) { + goto on_error; + } + + struct aws_rsa_private_key_pkcs1 private_key_data; + AWS_ZERO_STRUCT(private_key_data); + if (aws_der_decoder_load_private_rsa_pkcs1(decoder, &private_key_data)) { + goto on_error; + } + + /* Hard to predict final blob size, so use pkcs1 key size as upper bound. */ + size_t total_buffer_size = key.len + sizeof(BCRYPT_RSAKEY_BLOB); + + aws_byte_buf_init(&key_pair_impl->key_buf, allocator, total_buffer_size); + + BCRYPT_RSAKEY_BLOB key_blob; + AWS_ZERO_STRUCT(key_blob); + key_blob.Magic = BCRYPT_RSAFULLPRIVATE_MAGIC; + key_blob.BitLength = (ULONG)private_key_data.modulus.len * 8; + key_blob.cbPublicExp = (ULONG)private_key_data.publicExponent.len; + key_blob.cbModulus = (ULONG)private_key_data.modulus.len; + key_blob.cbPrime1 = (ULONG)private_key_data.prime1.len; + key_blob.cbPrime2 = (ULONG)private_key_data.prime2.len; + + struct aws_byte_cursor header = aws_byte_cursor_from_array(&key_blob, sizeof(key_blob)); + aws_byte_buf_append(&key_pair_impl->key_buf, &header); + + LPCWSTR blob_type = BCRYPT_RSAFULLPRIVATE_BLOB; + ULONG flags = 0; + + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.publicExponent); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.modulus); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.prime1); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.prime2); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.exponent1); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.exponent2); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.coefficient); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.privateExponent); + + NTSTATUS status = BCryptImportKeyPair( + s_rsa_alg, + NULL, + blob_type, + &key_pair_impl->key_handle, + key_pair_impl->key_buf.buffer, + (ULONG)key_pair_impl->key_buf.len, + flags); + + if (s_reinterpret_bc_error_as_crt(status, "BCryptImportKeyPair")) { + goto on_error; + } + + key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; + key_pair_impl->base.key_size_in_bits = private_key_data.modulus.len * 8; + + aws_der_decoder_destroy(decoder); + + return &key_pair_impl->base; + +on_error: + aws_der_decoder_destroy(decoder); + s_rsa_destroy_key(&key_pair_impl->base); + return NULL; +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor key) { + + aws_thread_call_once(&s_rsa_thread_once, s_load_alg_handle, NULL); + struct bcrypt_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct bcrypt_rsa_key_pair)); + + aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); + key_pair_impl->base.impl = key_pair_impl; + key_pair_impl->base.allocator = allocator; + aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.pub, allocator, key); + + struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, key); + + if (!decoder) { + goto on_error; + } + + struct aws_rsa_public_key_pkcs1 public_key_data; + AWS_ZERO_STRUCT(public_key_data); + if (aws_der_decoder_load_public_rsa_pkcs1(decoder, &public_key_data)) { + goto on_error; + } + + /* Hard to predict final blob size, so use pkcs1 key size as upper bound. */ + size_t total_buffer_size = key.len + sizeof(BCRYPT_RSAKEY_BLOB); + + aws_byte_buf_init(&key_pair_impl->key_buf, allocator, total_buffer_size); + + BCRYPT_RSAKEY_BLOB key_blob; + AWS_ZERO_STRUCT(key_blob); + key_blob.Magic = BCRYPT_RSAPUBLIC_MAGIC; + key_blob.BitLength = (ULONG)public_key_data.modulus.len * 8; + key_blob.cbPublicExp = (ULONG)public_key_data.publicExponent.len; + key_blob.cbModulus = (ULONG)public_key_data.modulus.len; + + struct aws_byte_cursor header = aws_byte_cursor_from_array(&key_blob, sizeof(key_blob)); + aws_byte_buf_append(&key_pair_impl->key_buf, &header); + + LPCWSTR blob_type = BCRYPT_PUBLIC_KEY_BLOB; + ULONG flags = 0; + + aws_byte_buf_append(&key_pair_impl->key_buf, &public_key_data.publicExponent); + aws_byte_buf_append(&key_pair_impl->key_buf, &public_key_data.modulus); + + NTSTATUS status = BCryptImportKeyPair( + s_rsa_alg, + NULL, + blob_type, + &key_pair_impl->key_handle, + key_pair_impl->key_buf.buffer, + (ULONG)key_pair_impl->key_buf.len, + flags); + + if (s_reinterpret_bc_error_as_crt(status, "BCryptImportKeyPair")) { + goto on_error; + } + + key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; + key_pair_impl->base.key_size_in_bits = public_key_data.modulus.len * 8; + + aws_der_decoder_destroy(decoder); + + return &key_pair_impl->base; + +on_error: + aws_der_decoder_destroy(decoder); + s_rsa_destroy_key(&key_pair_impl->base); + return NULL; +} |