diff options
author | thegeorg <[email protected]> | 2025-05-12 15:51:24 +0300 |
---|---|---|
committer | thegeorg <[email protected]> | 2025-05-12 16:06:27 +0300 |
commit | d629bb70c8773d2c0c43f5088ddbb5a86d8c37ea (patch) | |
tree | 4f678e0d65ad08c800db21c657d3b0f71fafed06 /contrib | |
parent | 92c4b696d7a1c03d54e13aff7a7c20a078d90dd7 (diff) |
Update contrib/restricted/aws libraries to nixpkgs 24.05
commit_hash:f8083acb039e6005e820cdee77b84e0a6b6c6d6d
Diffstat (limited to 'contrib')
267 files changed, 25291 insertions, 9574 deletions
diff --git a/contrib/restricted/aws/aws-c-auth/.yandex_meta/override.nix b/contrib/restricted/aws/aws-c-auth/.yandex_meta/override.nix index 6a964efc618..1977530432d 100644 --- a/contrib/restricted/aws/aws-c-auth/.yandex_meta/override.nix +++ b/contrib/restricted/aws/aws-c-auth/.yandex_meta/override.nix @@ -1,10 +1,10 @@ pkgs: attrs: with pkgs; with attrs; rec { - version = "0.6.27"; + version = "0.7.18"; src = fetchFromGitHub { owner = "awslabs"; repo = "aws-c-auth"; rev = "v${version}"; - hash = "sha256-rjluBj8C4GjE67Os0+1CKKI/2V9RnkbYKhpdqQBryik="; + hash = "sha256-JWYJz8ugYvXDvtJ5dRWVcA8F3PdjxO8aCc8l0jghYXg="; }; } diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/auth.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/auth.h index b00928372fc..7899cb89e89 100644 --- a/contrib/restricted/aws/aws-c-auth/include/aws/auth/auth.h +++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/auth.h @@ -11,6 +11,8 @@ #include <aws/io/logging.h> #include <aws/sdkutils/sdkutils.h> +AWS_PUSH_SANE_WARNING_LEVEL + #define AWS_C_AUTH_PACKAGE_ID 6 /** @@ -47,6 +49,9 @@ enum aws_auth_errors { AWS_AUTH_SSO_TOKEN_INVALID, AWS_AUTH_SSO_TOKEN_EXPIRED, AWS_AUTH_CREDENTIALS_PROVIDER_SSO_SOURCE_FAILURE, + AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE, + AWS_AUTH_PROFILE_STS_CREDENTIALS_PROVIDER_CYCLE_FAILURE, + AWS_AUTH_CREDENTIALS_PROVIDER_ECS_INVALID_TOKEN_FILE_PATH, AWS_AUTH_ERROR_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_AUTH_PACKAGE_ID) }; @@ -83,5 +88,6 @@ AWS_AUTH_API void aws_auth_library_clean_up(void); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_AUTH_H */ diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/aws_imds_client.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/aws_imds_client.h index b7eb1985742..f9f5580f2ed 100644 --- a/contrib/restricted/aws/aws-c-auth/include/aws/auth/aws_imds_client.h +++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/aws_imds_client.h @@ -12,6 +12,8 @@ #include <aws/http/connection_manager.h> #include <aws/io/retry_strategy.h> +AWS_PUSH_SANE_WARNING_LEVEL + typedef void(aws_imds_client_shutdown_completed_fn)(void *user_data); /** @@ -49,6 +51,11 @@ struct aws_imds_client_options { enum aws_imds_protocol_version imds_version; /* + * If true, fallback from v2 to v1 will be disabled for all cases + */ + bool ec2_metadata_v1_disabled; + + /* * Table holding all cross-system functional dependencies for an imds client. * * For mocking the http layer in tests, leave NULL otherwise @@ -477,5 +484,6 @@ int aws_imds_client_get_instance_info( void *user_data); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_IMDS_CLIENT_H */ diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/credentials.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/credentials.h index 2a92f42caba..0c0ca12eafa 100644 --- a/contrib/restricted/aws/aws-c-auth/include/aws/auth/credentials.h +++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/credentials.h @@ -12,6 +12,8 @@ #include <aws/common/linked_list.h> #include <aws/io/io.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_client_bootstrap; struct aws_auth_http_system_vtable; struct aws_credentials; @@ -214,6 +216,11 @@ struct aws_credentials_provider_imds_options { */ enum aws_imds_protocol_version imds_version; + /* + * If true, fallback from v2 to v1 will be disabled for all cases + */ + bool ec2_metadata_v1_disabled; + /* For mocking the http layer in tests, leave NULL otherwise */ struct aws_auth_http_system_vtable *function_table; }; @@ -226,10 +233,20 @@ struct aws_credentials_provider_imds_options { * or via a full uri specified by environment variables: * AWS_CONTAINER_CREDENTIALS_RELATIVE_URI * AWS_CONTAINER_CREDENTIALS_FULL_URI - * AWS_CONTAINER_AUTHORIZATION_TOKEN + * * If both relative uri and absolute uri are set, relative uri - * has higher priority. Token is used in auth header but only for - * absolute uri. + * has higher priority. + * + * Currently, the ECS creds provider doesn't read those environment variables and requires host & path_and_query + * TODO: Support AWS_CONTAINER_CREDENTIALS_RELATIVE_URI and AWS_CONTAINER_CREDENTIALS_FULL_URI + * parameters. + * + * For the Authorization token, there are three ways (in order of priority). + * 1. auth_token parameter + * 2. AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE (env var which contains absolute path to the token file. The file will be + * re-read for each call to get credentials.) + * 3. AWS_CONTAINER_AUTHORIZATION_TOKEN (env var which contains static auth token) + * * While above information is used in request only, endpoint info * is needed when creating ecs provider to initiate the connection * manager, more specifically, host and http scheme (tls or not) @@ -270,7 +287,7 @@ struct aws_credentials_provider_ecs_options { /* * Port to query credentials from. If zero, 80/443 will be used based on whether or not tls is enabled. */ - uint16_t port; + uint32_t port; }; /** @@ -321,7 +338,7 @@ struct aws_credentials_provider_x509_options { * identity provider like Elastic Kubernetes Service * https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html * The required parameters used in the request (region, roleArn, sessionName, tokenFilePath) are automatically resolved - * by SDK from envrionment variables or config file. + * by SDK from envrionment variables or config file if not set. --------------------------------------------------------------------------------- | Parameter | Environment Variable Name | Config File Property Name | ---------------------------------------------------------------------------------- @@ -330,6 +347,10 @@ struct aws_credentials_provider_x509_options { | role_session_name | AWS_ROLE_SESSION_NAME | role_session_name | | token_file_path | AWS_WEB_IDENTITY_TOKEN_FILE | web_identity_token_file | |--------------------------------------------------------------------------------| + * The order of resolution is the following + * 1. Parameters + * 2. Environment Variables + * 3. Config File */ struct aws_credentials_provider_sts_web_identity_options { struct aws_credentials_provider_shutdown_options shutdown_options; @@ -353,6 +374,33 @@ struct aws_credentials_provider_sts_web_identity_options { /* For mocking the http layer in tests, leave NULL otherwise */ struct aws_auth_http_system_vtable *function_table; + + /* + * (Optional) + * Override of what profile to use, if not set, 'default' will be used. + */ + struct aws_byte_cursor profile_name_override; + + /* + * (Optional) + * Override of region, if not set, it will be resolved from env or profile. + */ + struct aws_byte_cursor region; + /* + * (Optional) + * Override of role_arn, if not set, it will be resolved from env or profile. + */ + struct aws_byte_cursor role_arn; + /* + * (Optional) + * Override of role_session_name, if not set, it will be resolved from env or profile. + */ + struct aws_byte_cursor role_session_name; + /* + * (Optional) + * Override of token_file_path, if not set, it will be resolved from env or profile. + */ + struct aws_byte_cursor token_file_path; }; /* @@ -465,7 +513,6 @@ struct aws_credentials_provider_sts_options { "Expiration": "2019-05-29T00:21:43Z" } * Version here identifies the command output format version. - * This provider is not part of the default provider chain. */ struct aws_credentials_provider_process_options { struct aws_credentials_provider_shutdown_options shutdown_options; @@ -474,6 +521,12 @@ struct aws_credentials_provider_process_options { * if not provided, we will try environment variable: AWS_PROFILE. */ struct aws_byte_cursor profile_to_use; + + /** + * (Optional) + * Use a cached config profile collection. You can also pass a merged collection. + */ + struct aws_profile_collection *config_profile_collection_cached; }; /** @@ -505,6 +558,18 @@ struct aws_credentials_provider_chain_default_options { * If this option is provided, `config_file_name_override` and `credentials_file_name_override` will be ignored. */ struct aws_profile_collection *profile_collection_cached; + + /* + * (Optional) + * Override of what profile to use, if not set, 'default' will be used. + */ + struct aws_byte_cursor profile_name_override; + + /* + * (Optional) + * If enabled, the Environment Credentials Provider is not added to the chain. + */ + bool skip_environment_credentials_provider; }; typedef int(aws_credentials_provider_delegate_get_credentials_fn)( @@ -1063,5 +1128,6 @@ struct aws_credentials_provider *aws_credentials_provider_new_chain_default( AWS_AUTH_API extern const struct aws_auth_http_system_vtable *g_aws_credentials_provider_http_function_table; AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_CREDENTIALS_H */ diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/aws_signing.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/aws_signing.h index 3ba0fd2c74a..87545033b8a 100644 --- a/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/aws_signing.h +++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/aws_signing.h @@ -109,6 +109,7 @@ AWS_AUTH_API extern const struct aws_string *g_aws_signing_credential_query_para AWS_AUTH_API extern const struct aws_string *g_aws_signing_date_name; AWS_AUTH_API extern const struct aws_string *g_aws_signing_signed_headers_query_param_name; AWS_AUTH_API extern const struct aws_string *g_aws_signing_security_token_name; +AWS_AUTH_API extern const struct aws_string *g_aws_signing_s3session_token_name; AWS_AUTH_API extern const struct aws_string *g_signature_type_sigv4a_http_request; /** diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/credentials_utils.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/credentials_utils.h index 598c3ba0626..1d87d78cf21 100644 --- a/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/credentials_utils.h +++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/credentials_utils.h @@ -68,6 +68,8 @@ struct aws_auth_http_system_vtable { aws_http_stream_release_fn *aws_http_stream_release; aws_http_connection_close_fn *aws_http_connection_close; + + int (*aws_high_res_clock_get_ticks)(uint64_t *timestamp); }; enum aws_parse_credentials_expiration_format { diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/signable.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signable.h index 46f65378d74..86c32de69b0 100644 --- a/contrib/restricted/aws/aws-c-auth/include/aws/auth/signable.h +++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signable.h @@ -3,6 +3,8 @@ #include <aws/auth/auth.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_http_message; struct aws_http_headers; struct aws_input_stream; @@ -230,5 +232,6 @@ struct aws_signable *aws_signable_new_canonical_request( struct aws_byte_cursor canonical_request); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_SIGNABLE_H */ diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing.h index 185a0abb54c..fe481c68981 100644 --- a/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing.h +++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing.h @@ -10,6 +10,8 @@ #include <aws/auth/signing_config.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_ecc_key_pair; struct aws_signable; struct aws_signing_result; @@ -131,5 +133,6 @@ AWS_AUTH_API struct aws_byte_cursor aws_trim_padded_sigv4a_signature(struct aws_byte_cursor signature); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_SIGNER_H */ diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_config.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_config.h index e7ce06321cd..15720466ed3 100644 --- a/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_config.h +++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_config.h @@ -11,6 +11,8 @@ #include <aws/common/byte_buf.h> #include <aws/common/date_time.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_credentials; typedef bool(aws_should_sign_header_fn)(const struct aws_byte_cursor *name, void *userdata); @@ -37,6 +39,7 @@ struct aws_signing_config_base { enum aws_signing_algorithm { AWS_SIGNING_ALGORITHM_V4, AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC, + AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, }; /** @@ -306,5 +309,6 @@ AWS_AUTH_API int aws_validate_aws_signing_config_aws(const struct aws_signing_config_aws *config); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_SIGNING_CONFIG_H */ diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_result.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_result.h index 7e3ba8cf98d..309665adc19 100644 --- a/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_result.h +++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_result.h @@ -10,6 +10,8 @@ #include <aws/common/hash_table.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_array_list; struct aws_byte_cursor; struct aws_http_message; @@ -162,5 +164,6 @@ AWS_AUTH_API extern const struct aws_string *g_aws_signing_authorization_header_ AWS_AUTH_API extern const struct aws_string *g_aws_signing_authorization_query_param_name; AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_SIGNING_RESULT_H */ diff --git a/contrib/restricted/aws/aws-c-auth/source/auth.c b/contrib/restricted/aws/aws-c-auth/source/auth.c index 5a0fbc8ca6d..1e8be09ec26 100644 --- a/contrib/restricted/aws/aws-c-auth/source/auth.c +++ b/contrib/restricted/aws/aws-c-auth/source/auth.c @@ -103,7 +103,15 @@ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_SSO_SOURCE_FAILURE, "Valid credentials could not be sourced by the sso credentials provider"), - + AWS_DEFINE_ERROR_INFO_AUTH( + AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE, + "Failed to source the IMDS resource"), + AWS_DEFINE_ERROR_INFO_AUTH( + AWS_AUTH_PROFILE_STS_CREDENTIALS_PROVIDER_CYCLE_FAILURE, + "Failed to resolve credentials because the profile contains a cycle in the assumeRole chain."), + AWS_DEFINE_ERROR_INFO_AUTH( + AWS_AUTH_CREDENTIALS_PROVIDER_ECS_INVALID_TOKEN_FILE_PATH, + "Failed to read the ECS token file specified in the AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable."), }; /* clang-format on */ diff --git a/contrib/restricted/aws/aws-c-auth/source/aws_imds_client.c b/contrib/restricted/aws/aws-c-auth/source/aws_imds_client.c index 5ef16bc408d..5ea2b80152d 100644 --- a/contrib/restricted/aws/aws-c-auth/source/aws_imds_client.c +++ b/contrib/restricted/aws/aws-c-auth/source/aws_imds_client.c @@ -30,6 +30,8 @@ #define IMDS_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS 2 #define IMDS_DEFAULT_RETRIES 1 +AWS_STATIC_STRING_FROM_LITERAL(s_imds_host, "169.254.169.254"); + enum imds_token_state { AWS_IMDS_TS_INVALID, AWS_IMDS_TS_VALID, @@ -56,13 +58,16 @@ struct aws_imds_client { struct aws_retry_strategy *retry_strategy; const struct aws_auth_http_system_vtable *function_table; struct aws_imds_client_shutdown_options shutdown_options; + /* will be set to true by default, means using IMDS V2 */ bool token_required; struct aws_byte_buf cached_token; + uint64_t cached_token_expiration_timestamp; enum imds_token_state token_state; struct aws_linked_list pending_queries; struct aws_mutex token_lock; struct aws_condition_variable token_signal; + bool ec2_metadata_v1_disabled; struct aws_atomic_var ref_count; }; @@ -142,6 +147,7 @@ struct aws_imds_client *aws_imds_client_new( client->function_table = options->function_table ? options->function_table : g_aws_credentials_provider_http_function_table; client->token_required = options->imds_version == IMDS_PROTOCOL_V1 ? false : true; + client->ec2_metadata_v1_disabled = options->ec2_metadata_v1_disabled; client->shutdown_options = options->shutdown_options; struct aws_socket_options socket_options; @@ -157,18 +163,12 @@ struct aws_imds_client *aws_imds_client_new( manager_options.initial_window_size = IMDS_RESPONSE_SIZE_LIMIT; manager_options.socket_options = &socket_options; manager_options.tls_connection_options = NULL; - manager_options.host = aws_byte_cursor_from_c_str("169.254.169.254"); + manager_options.host = aws_byte_cursor_from_string(s_imds_host); manager_options.port = 80; manager_options.max_connections = 10; manager_options.shutdown_complete_callback = s_on_connection_manager_shutdown; manager_options.shutdown_complete_user_data = client; - struct aws_http_connection_monitoring_options monitor_options; - AWS_ZERO_STRUCT(monitor_options); - monitor_options.allowable_throughput_failure_interval_seconds = 1; - monitor_options.minimum_throughput_bytes_per_second = 1; - manager_options.monitoring_options = &monitor_options; - client->connection_manager = client->function_table->aws_http_connection_manager_new(allocator, &manager_options); if (!client->connection_manager) { goto on_error; @@ -219,7 +219,10 @@ struct imds_user_data { * will be adapted according to response. */ bool imds_token_required; + /* Indicate the request is a fallback from a failure call. */ + bool is_fallback_request; bool is_imds_token_request; + bool ec2_metadata_v1_disabled; int status_code; int error_code; @@ -281,6 +284,7 @@ static struct imds_user_data *s_user_data_new( } wrapped_user_data->imds_token_required = client->token_required; + wrapped_user_data->ec2_metadata_v1_disabled = client->ec2_metadata_v1_disabled; aws_atomic_store_int(&wrapped_user_data->ref_count, 1); return wrapped_user_data; @@ -317,8 +321,11 @@ static void s_reset_scratch_user_data(struct imds_user_data *user_data) { } static enum imds_token_copy_result s_copy_token_safely(struct imds_user_data *user_data); -static void s_invalidate_cached_token_safely(struct imds_user_data *user_data); -static bool s_update_token_safely(struct aws_imds_client *client, struct aws_byte_buf *token, bool token_required); +static void s_update_token_safely( + struct aws_imds_client *client, + struct aws_byte_buf *token, + bool token_required, + uint64_t expire_timestamp); static void s_query_complete(struct imds_user_data *user_data); static void s_on_acquire_connection(struct aws_http_connection *connection, int error_code, void *user_data); static void s_on_retry_token_acquired(struct aws_retry_strategy *, int, struct aws_retry_token *, void *); @@ -335,7 +342,7 @@ static int s_on_incoming_body_fn(struct aws_http_stream *stream, const struct aw AWS_LOGF_ERROR( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client query response exceeded maximum allowed length", (void *)client); - return AWS_OP_ERR; + return aws_raise_error(AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE); } if (aws_byte_buf_append_dynamic(&imds_user_data->current_result, data)) { @@ -384,6 +391,7 @@ static int s_on_incoming_headers_fn( return AWS_OP_SUCCESS; } +AWS_STATIC_STRING_FROM_LITERAL(s_imds_host_header, "Host"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_accept_header, "Accept"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_accept_header_value, "*/*"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_user_agent_header, "User-Agent"); @@ -394,6 +402,8 @@ AWS_STATIC_STRING_FROM_LITERAL(s_imds_token_resource_path, "/latest/api/token"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_token_ttl_header, "x-aws-ec2-metadata-token-ttl-seconds"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_token_header, "x-aws-ec2-metadata-token"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_token_ttl_default_value, "21600"); +/* s_imds_token_ttl_default_value - 5secs for refreshing the cached token */ +static const uint64_t s_imds_token_ttl_secs = 21595; static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data); @@ -417,6 +427,14 @@ static int s_make_imds_http_query( goto on_error; } + struct aws_http_header host_header = { + .name = aws_byte_cursor_from_string(s_imds_host_header), + .value = aws_byte_cursor_from_string(s_imds_host), + }; + if (aws_http_message_add_header(request, host_header)) { + goto on_error; + } + struct aws_http_header accept_header = { .name = aws_byte_cursor_from_string(s_imds_accept_header), .value = aws_byte_cursor_from_string(s_imds_accept_header_value), @@ -457,6 +475,7 @@ static int s_make_imds_http_query( .on_response_header_block_done = NULL, .on_response_body = s_on_incoming_body_fn, .on_complete = s_on_stream_complete_fn, + .response_first_byte_timeout_ms = 1000, .user_data = user_data, .request = request, }; @@ -486,25 +505,51 @@ on_error: static void s_client_on_token_response(struct imds_user_data *user_data) { /* Gets 400 means token is required but the request itself failed. */ if (user_data->status_code == AWS_HTTP_STATUS_CODE_400_BAD_REQUEST) { - s_update_token_safely(user_data->client, NULL, true); + s_update_token_safely(user_data->client, NULL, true, 0 /*expire_timestamp*/); return; } - /* - * Other than that, if meets any error, then token is not required, - * we should fall back to insecure request. Otherwise, we should use - * token in following requests. - */ - if (user_data->status_code != AWS_HTTP_STATUS_CODE_200_OK || user_data->current_result.len == 0) { - s_update_token_safely(user_data->client, NULL, false); - } else { + + if (user_data->status_code == AWS_HTTP_STATUS_CODE_200_OK && user_data->current_result.len != 0) { + AWS_LOGF_DEBUG(AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client has fetched the token", (void *)user_data->client); + struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&(user_data->current_result)); aws_byte_cursor_trim_pred(&cursor, aws_char_is_space); aws_byte_buf_reset(&user_data->imds_token, true /*zero contents*/); if (aws_byte_buf_append_and_update(&user_data->imds_token, &cursor)) { - s_update_token_safely(user_data->client, NULL, true); + s_update_token_safely(user_data->client, NULL /*token*/, true /*token_required*/, 0 /*expire_timestamp*/); return; } - s_update_token_safely(user_data->client, cursor.len == 0 ? NULL : &user_data->imds_token, cursor.len != 0); + /* The token was ALWAYS last for 6 hours, 21600 secs. Use current timestamp plus 21595 secs as the expiration + * timestamp for current token */ + uint64_t current = 0; + user_data->client->function_table->aws_high_res_clock_get_ticks(¤t); + uint64_t expire_timestamp = aws_add_u64_saturating( + current, aws_timestamp_convert(s_imds_token_ttl_secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); + + AWS_ASSERT(cursor.len != 0); + s_update_token_safely(user_data->client, &user_data->imds_token, true /*token_required*/, expire_timestamp); + } else if (user_data->ec2_metadata_v1_disabled) { + AWS_LOGF_DEBUG( + AWS_LS_IMDS_CLIENT, + "(id=%p) IMDS client failed to fetch token for requester %p, and fall back to v1 is disabled." + "Received response status code: %d", + (void *)user_data->client, + (void *)user_data, + user_data->status_code); + s_update_token_safely(user_data->client, NULL /*token*/, true /*token_required*/, 0 /*expire_timestamp*/); + } else { + /* Request failed; falling back to insecure request. + * TODO: The retryable error (503 throttle) will also fall back to v1. Instead, we should just resend the token + * request. + */ + AWS_LOGF_DEBUG( + AWS_LS_IMDS_CLIENT, + "(id=%p) IMDS client failed to fetch token for requester %p, fall back to v1 for the same " + "requester. Received response status code: %d", + (void *)user_data->client, + (void *)user_data, + user_data->status_code); + s_update_token_safely(user_data->client, NULL /*token*/, false /* token_required*/, 0 /*expire_timestamp*/); } } @@ -533,6 +578,7 @@ static void s_client_do_query_token(struct imds_user_data *user_data) { /* start query token for imds client */ struct aws_byte_cursor uri = aws_byte_cursor_from_string(s_imds_token_resource_path); + /* Hard-coded 6 hour TTL for the token. */ struct aws_http_header token_ttl_header = { .name = aws_byte_cursor_from_string(s_imds_token_ttl_header), .value = aws_byte_cursor_from_string(s_imds_token_ttl_default_value), @@ -596,17 +642,50 @@ static void s_query_complete(struct imds_user_data *user_data) { return; } - /* In this case we fallback to the secure imds flow. */ if (user_data->status_code == AWS_HTTP_STATUS_CODE_401_UNAUTHORIZED) { - s_invalidate_cached_token_safely(user_data); - s_reset_scratch_user_data(user_data); - aws_retry_token_release(user_data->retry_token); - if (s_get_resource_async_with_imds_token(user_data)) { - s_user_data_release(user_data); + struct aws_imds_client *client = user_data->client; + aws_mutex_lock(&client->token_lock); + if (aws_byte_buf_eq(&user_data->imds_token, &client->cached_token)) { + /* If the token used matches the cached token, that means the cached token is invalid. */ + client->token_state = AWS_IMDS_TS_INVALID; + AWS_LOGF_DEBUG( + AWS_LS_IMDS_CLIENT, + "(id=%p) IMDS client's cached token is invalidated by requester %p.", + (void *)client, + (void *)user_data); + } + /* let following requests use token as it's required. */ + client->token_required = true; + aws_mutex_unlock(&client->token_lock); + + if (!user_data->imds_token_required && !user_data->is_fallback_request) { + AWS_LOGF_DEBUG( + AWS_LS_IMDS_CLIENT, + "(id=%p) IMDS client failed to fetch resource via V1, try to use V2. requester %p.", + (void *)user_data->client, + (void *)user_data); + /* V1 request, fallback to V2 and try again. */ + s_reset_scratch_user_data(user_data); + user_data->is_fallback_request = true; + aws_retry_token_release(user_data->retry_token); + /* Try V2 now. */ + if (s_get_resource_async_with_imds_token(user_data)) { + s_user_data_release(user_data); + } + return; + } else { + /* Not retirable error. */ + AWS_LOGF_ERROR( + AWS_LS_IMDS_CLIENT, + "(id=%p) IMDS client failed to fetch resource. Server response 401 UNAUTHORIZED. requester %p.", + (void *)user_data->client, + (void *)user_data); + user_data->error_code = AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE; } - return; } + /* TODO: if server sent out error, we will still report as succeed with the error body received from server. */ + /* TODO: retry for 503 throttle. */ user_data->original_callback( user_data->error_code ? NULL : &user_data->current_result, user_data->error_code, @@ -672,6 +751,7 @@ static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_co client->function_table->aws_http_connection_manager_release_connection(client->connection_manager, connection); /* on encountering error, see if we could try again */ + /* TODO: check the status code as well? */ if (error_code) { AWS_LOGF_WARN( AWS_LS_IMDS_CLIENT, @@ -742,8 +822,27 @@ static void s_complete_pending_queries( struct imds_user_data *requester = query->user_data; aws_mem_release(client->allocator, query); - requester->imds_token_required = token_required; bool should_continue = true; + if (requester->imds_token_required && !token_required) { + if (requester->is_fallback_request) { + AWS_LOGF_ERROR( + AWS_LS_IMDS_CLIENT, + "(id=%p) IMDS client failed to fetch resource without token, and also failed to fetch token. " + "requester %p.", + (void *)requester->client, + (void *)requester); + requester->error_code = AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE; + should_continue = false; + } else { + AWS_LOGF_DEBUG( + AWS_LS_IMDS_CLIENT, + "(id=%p) IMDS client failed to fetch token, fallback to v1. requester %p.", + (void *)requester->client, + (void *)requester); + requester->is_fallback_request = true; + } + } + requester->imds_token_required = token_required; if (token) { aws_byte_buf_reset(&requester->imds_token, true); struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(token); @@ -756,6 +855,7 @@ static void s_complete_pending_queries( should_continue = false; } } else if (token_required) { + requester->error_code = AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE; should_continue = false; } @@ -770,9 +870,8 @@ static void s_complete_pending_queries( } if (!should_continue) { - requester->error_code = aws_last_error(); if (requester->error_code == AWS_ERROR_SUCCESS) { - requester->error_code = AWS_ERROR_UNKNOWN; + requester->error_code = aws_last_error() == AWS_ERROR_SUCCESS ? AWS_ERROR_UNKNOWN : aws_last_error(); } s_query_complete(requester); } @@ -785,25 +884,35 @@ static enum imds_token_copy_result s_copy_token_safely(struct imds_user_data *us struct aws_linked_list pending_queries; aws_linked_list_init(&pending_queries); - aws_mutex_lock(&client->token_lock); + uint64_t current = 0; + user_data->client->function_table->aws_high_res_clock_get_ticks(¤t); + aws_mutex_lock(&client->token_lock); if (client->token_state == AWS_IMDS_TS_VALID) { - aws_byte_buf_reset(&user_data->imds_token, true); - struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&client->cached_token); - if (aws_byte_buf_append_dynamic(&user_data->imds_token, &cursor)) { - ret = AWS_IMDS_TCR_UNEXPECTED_ERROR; + if (current > client->cached_token_expiration_timestamp) { + /* The cached token expired. Switch the state */ + client->token_state = AWS_IMDS_TS_INVALID; + AWS_LOGF_DEBUG( + AWS_LS_IMDS_CLIENT, + "(id=%p) IMDS client's cached token expired. Fetching new token for requester %p.", + (void *)client, + (void *)user_data); } else { - ret = AWS_IMDS_TCR_SUCCESS; + aws_byte_buf_reset(&user_data->imds_token, true); + struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&client->cached_token); + if (aws_byte_buf_append_dynamic(&user_data->imds_token, &cursor)) { + ret = AWS_IMDS_TCR_UNEXPECTED_ERROR; + } else { + ret = AWS_IMDS_TCR_SUCCESS; + } } - } else { + } + + if (client->token_state != AWS_IMDS_TS_VALID) { ret = AWS_IMDS_TCR_WAITING_IN_QUEUE; struct imds_token_query *query = aws_mem_calloc(client->allocator, 1, sizeof(struct imds_token_query)); - if (query != NULL) { - query->user_data = user_data; - aws_linked_list_push_back(&client->pending_queries, &query->node); - } else { - ret = AWS_IMDS_TCR_UNEXPECTED_ERROR; - } + query->user_data = user_data; + aws_linked_list_push_back(&client->pending_queries, &query->node); if (client->token_state == AWS_IMDS_TS_INVALID) { if (s_client_start_query_token(client)) { @@ -844,31 +953,16 @@ static enum imds_token_copy_result s_copy_token_safely(struct imds_user_data *us } return ret; } - -static void s_invalidate_cached_token_safely(struct imds_user_data *user_data) { - bool invalidated = false; - struct aws_imds_client *client = user_data->client; - aws_mutex_lock(&client->token_lock); - if (aws_byte_buf_eq(&user_data->imds_token, &client->cached_token)) { - client->token_state = AWS_IMDS_TS_INVALID; - invalidated = true; - } - aws_mutex_unlock(&client->token_lock); - if (invalidated) { - AWS_LOGF_DEBUG( - AWS_LS_IMDS_CLIENT, - "(id=%p) IMDS client's cached token is set to be invalid by requester %p.", - (void *)client, - (void *)user_data); - } -} - /** * Once a requseter returns from token request, it should call this function to unblock all other * waiting requesters. When the token parameter is NULL, means the token request failed. Now we need * a new requester to acquire the token again. */ -static bool s_update_token_safely(struct aws_imds_client *client, struct aws_byte_buf *token, bool token_required) { +static void s_update_token_safely( + struct aws_imds_client *client, + struct aws_byte_buf *token, + bool token_required, + uint64_t expire_timestamp) { AWS_FATAL_ASSERT(client); bool updated = false; @@ -882,6 +976,7 @@ static bool s_update_token_safely(struct aws_imds_client *client, struct aws_byt struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(token); if (aws_byte_buf_append_dynamic(&client->cached_token, &cursor) == AWS_OP_SUCCESS) { client->token_state = AWS_IMDS_TS_VALID; + client->cached_token_expiration_timestamp = expire_timestamp; updated = true; } } else { @@ -898,7 +993,6 @@ static bool s_update_token_safely(struct aws_imds_client *client, struct aws_byt } else { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to update the token from IMDS.", (void *)client); } - return updated; } int s_get_resource_async_with_imds_token(struct imds_user_data *user_data) { diff --git a/contrib/restricted/aws/aws-c-auth/source/aws_signing.c b/contrib/restricted/aws/aws-c-auth/source/aws_signing.c index aa38b8c6835..fe5610e2461 100644 --- a/contrib/restricted/aws/aws-c-auth/source/aws_signing.c +++ b/contrib/restricted/aws/aws-c-auth/source/aws_signing.c @@ -59,6 +59,7 @@ AWS_STRING_FROM_LITERAL(g_aws_signing_credential_query_param_name, "X-Amz-Creden AWS_STRING_FROM_LITERAL(g_aws_signing_date_name, "X-Amz-Date"); AWS_STRING_FROM_LITERAL(g_aws_signing_signed_headers_query_param_name, "X-Amz-SignedHeaders"); AWS_STRING_FROM_LITERAL(g_aws_signing_security_token_name, "X-Amz-Security-Token"); +AWS_STRING_FROM_LITERAL(g_aws_signing_s3session_token_name, "X-Amz-S3session-Token"); AWS_STRING_FROM_LITERAL(g_aws_signing_expires_query_param_name, "X-Amz-Expires"); AWS_STRING_FROM_LITERAL(g_aws_signing_region_set_name, "X-Amz-Region-Set"); @@ -87,6 +88,7 @@ static struct aws_byte_cursor s_amz_date_header_name; static struct aws_byte_cursor s_authorization_header_name; static struct aws_byte_cursor s_region_set_header_name; static struct aws_byte_cursor s_amz_security_token_header_name; +static struct aws_byte_cursor s_amz_s3session_token_header_name; static struct aws_byte_cursor s_amz_signature_param_name; static struct aws_byte_cursor s_amz_date_param_name; @@ -191,6 +193,11 @@ int aws_signing_init_signing_tables(struct aws_allocator *allocator) { return AWS_OP_ERR; } + s_amz_s3session_token_header_name = aws_byte_cursor_from_string(g_aws_signing_s3session_token_name); + if (aws_hash_table_put(&s_forbidden_headers, &s_amz_s3session_token_header_name, NULL, NULL)) { + return AWS_OP_ERR; + } + if (aws_hash_table_init( &s_forbidden_params, allocator, @@ -279,25 +286,25 @@ static int s_get_signature_type_cursor(struct aws_signing_state_aws *state, stru case AWS_ST_HTTP_REQUEST_QUERY_PARAMS: case AWS_ST_CANONICAL_REQUEST_HEADERS: case AWS_ST_CANONICAL_REQUEST_QUERY_PARAMS: - if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4) { - *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4_http_request); - } else { + if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) { *cursor = aws_byte_cursor_from_string(g_signature_type_sigv4a_http_request); + } else { + *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4_http_request); } break; case AWS_ST_HTTP_REQUEST_CHUNK: case AWS_ST_HTTP_REQUEST_EVENT: - if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4) { - *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4_s3_chunked_payload); - } else { + if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) { *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4a_s3_chunked_payload); + } else { + *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4_s3_chunked_payload); } break; case AWS_ST_HTTP_REQUEST_TRAILING_HEADERS: - if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4) { - *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4_s3_chunked_trailer_payload); - } else { + if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) { *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4a_s3_chunked_trailer_payload); + } else { + *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4_s3_chunked_trailer_payload); } break; @@ -841,12 +848,20 @@ static int s_add_authorization_query_params( } } - /* X-Amz-Security-token */ - struct aws_byte_cursor security_token_name_cur = aws_byte_cursor_from_string(g_aws_signing_security_token_name); + /* X-Amz-*-token */ + /* We have different token between S3Express and other signing, which needs different token header name */ + struct aws_byte_cursor token_header_name; + if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_S3EXPRESS) { + /* X-Amz-S3session-Token */ + token_header_name = s_amz_s3session_token_header_name; + } else { + /* X-Amz-Security-Token */ + token_header_name = s_amz_security_token_header_name; + } struct aws_byte_cursor session_token_cursor = aws_credentials_get_session_token(state->config.credentials); if (session_token_cursor.len > 0) { struct aws_uri_param security_token_param = { - .key = security_token_name_cur, + .key = token_header_name, .value = session_token_cursor, }; @@ -1262,8 +1277,17 @@ static int s_build_canonical_stable_header_list( if (state->config.signature_type == AWS_ST_HTTP_REQUEST_HEADERS) { /* - * X-Amz-Security-Token + * X-Amz-*-Token */ + /* We have different token between S3Express and other signing, which needs different token header name */ + struct aws_byte_cursor token_header_name; + if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_S3EXPRESS) { + /* X-Amz-S3session-Token */ + token_header_name = s_amz_s3session_token_header_name; + } else { + /* X-Amz-Security-Token */ + token_header_name = s_amz_security_token_header_name; + } struct aws_byte_cursor session_token_cursor = aws_credentials_get_session_token(state->config.credentials); if (session_token_cursor.len > 0) { /* Note that if omit_session_token is true, it is added to final @@ -1272,17 +1296,13 @@ static int s_build_canonical_stable_header_list( if (aws_signing_result_append_property_list( &state->result, g_aws_http_headers_property_list_name, - &s_amz_security_token_header_name, + &token_header_name, &session_token_cursor)) { return AWS_OP_ERR; } } else { if (s_add_authorization_header( - state, - stable_header_list, - out_required_capacity, - s_amz_security_token_header_name, - session_token_cursor)) { + state, stable_header_list, out_required_capacity, token_header_name, session_token_cursor)) { return AWS_OP_ERR; } } @@ -1597,6 +1617,7 @@ static int s_append_credential_scope_terminator(enum aws_signing_algorithm algor switch (algorithm) { case AWS_SIGNING_ALGORITHM_V4: + case AWS_SIGNING_ALGORITHM_V4_S3EXPRESS: case AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC: terminator_cursor = aws_byte_cursor_from_string(s_credential_scope_sigv4_terminator); break; @@ -2067,7 +2088,7 @@ int aws_signing_build_canonical_request(struct aws_signing_state_aws *state) { return s_apply_existing_canonical_request(state); default: - return AWS_OP_ERR; + return aws_raise_error(AWS_AUTH_SIGNING_UNSUPPORTED_SIGNATURE_TYPE); } } @@ -2304,6 +2325,7 @@ cleanup: int s_calculate_signature_value(struct aws_signing_state_aws *state) { switch (state->config.algorithm) { case AWS_SIGNING_ALGORITHM_V4: + case AWS_SIGNING_ALGORITHM_V4_S3EXPRESS: return s_calculate_sigv4_signature_value(state); case AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC: diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_anonymous.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_anonymous.c index a0ac07714f4..62a9a6593fb 100644 --- a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_anonymous.c +++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_anonymous.c @@ -48,7 +48,9 @@ struct aws_credentials_provider *aws_credentials_provider_new_anonymous( aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_anonymous_vtable, credentials); - provider->shutdown_options = *shutdown_options; + if (shutdown_options) { + provider->shutdown_options = *shutdown_options; + } return provider; diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_default_chain.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_default_chain.c index a68241f9e02..ccd2bfbbf5b 100644 --- a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_default_chain.c +++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_default_chain.c @@ -27,7 +27,6 @@ AWS_STATIC_STRING_FROM_LITERAL(s_ecs_creds_env_relative_uri, "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); AWS_STATIC_STRING_FROM_LITERAL(s_ecs_creds_env_full_uri, "AWS_CONTAINER_CREDENTIALS_FULL_URI"); -AWS_STATIC_STRING_FROM_LITERAL(s_ecs_creds_env_token, "AWS_CONTAINER_AUTHORIZATION_TOKEN"); AWS_STATIC_STRING_FROM_LITERAL(s_ecs_host, "169.254.170.2"); AWS_STATIC_STRING_FROM_LITERAL(s_ec2_creds_env_disable, "AWS_EC2_METADATA_DISABLED"); @@ -41,41 +40,35 @@ static struct aws_credentials_provider *s_aws_credentials_provider_new_ecs_or_im struct aws_client_bootstrap *bootstrap, struct aws_tls_ctx *tls_ctx) { - struct aws_byte_cursor auth_token_cursor; - AWS_ZERO_STRUCT(auth_token_cursor); - struct aws_credentials_provider *ecs_or_imds_provider = NULL; struct aws_string *ecs_relative_uri = NULL; struct aws_string *ecs_full_uri = NULL; struct aws_string *ec2_imds_disable = NULL; - struct aws_string *ecs_token = NULL; if (aws_get_environment_value(allocator, s_ecs_creds_env_relative_uri, &ecs_relative_uri) != AWS_OP_SUCCESS || aws_get_environment_value(allocator, s_ecs_creds_env_full_uri, &ecs_full_uri) != AWS_OP_SUCCESS || - aws_get_environment_value(allocator, s_ec2_creds_env_disable, &ec2_imds_disable) != AWS_OP_SUCCESS || - aws_get_environment_value(allocator, s_ecs_creds_env_token, &ecs_token) != AWS_OP_SUCCESS) { + aws_get_environment_value(allocator, s_ec2_creds_env_disable, &ec2_imds_disable) != AWS_OP_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed reading environment variables during default credentials provider chain initialization."); goto clean_up; } - if (ecs_token && ecs_token->len) { - auth_token_cursor = aws_byte_cursor_from_string(ecs_token); - } - /* * ToDo: the uri choice logic should be done in the ecs provider init logic. As it stands, it's a nightmare * to try and use the ecs provider anywhere outside the default chain. */ if (ecs_relative_uri && ecs_relative_uri->len) { + AWS_LOGF_INFO( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "default chain: ECS credentials provider with relative URI %s will be used to retrieve credentials", + aws_string_c_str(ecs_relative_uri)); struct aws_credentials_provider_ecs_options ecs_options = { .shutdown_options = *shutdown_options, .bootstrap = bootstrap, .host = aws_byte_cursor_from_string(s_ecs_host), .path_and_query = aws_byte_cursor_from_string(ecs_relative_uri), .tls_ctx = NULL, - .auth_token = auth_token_cursor, }; ecs_or_imds_provider = aws_credentials_provider_new_ecs(allocator, &ecs_options); @@ -83,22 +76,39 @@ static struct aws_credentials_provider *s_aws_credentials_provider_new_ecs_or_im struct aws_uri uri; struct aws_byte_cursor uri_cstr = aws_byte_cursor_from_string(ecs_full_uri); if (AWS_OP_ERR == aws_uri_init_parse(&uri, allocator, &uri_cstr)) { + AWS_LOGF_ERROR( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "default chain: failed to parse URI %s during default credentials provider chain initialization: %s", + aws_string_c_str(ecs_full_uri), + aws_error_str(aws_last_error())); goto clean_up; } + AWS_LOGF_INFO( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "default chain: ECS credentials provider with full URI %s will be used to retrieve credentials", + aws_string_c_str(ecs_full_uri)); + + struct aws_byte_cursor path_and_query = uri.path_and_query; + if (path_and_query.len == 0) { + path_and_query = aws_byte_cursor_from_c_str("/"); + } + struct aws_credentials_provider_ecs_options ecs_options = { .shutdown_options = *shutdown_options, .bootstrap = bootstrap, .host = uri.host_name, - .path_and_query = uri.path_and_query, + .path_and_query = path_and_query, .tls_ctx = aws_byte_cursor_eq_c_str_ignore_case(&(uri.scheme), "HTTPS") ? tls_ctx : NULL, - .auth_token = auth_token_cursor, .port = uri.port, }; ecs_or_imds_provider = aws_credentials_provider_new_ecs(allocator, &ecs_options); aws_uri_clean_up(&uri); } else if (ec2_imds_disable == NULL || aws_string_eq_c_str_ignore_case(ec2_imds_disable, "false")) { + AWS_LOGF_INFO( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "default chain: IMDS credentials provider will be used to retrieve credentials"); struct aws_credentials_provider_imds_options imds_options = { .shutdown_options = *shutdown_options, .bootstrap = bootstrap, @@ -107,11 +117,15 @@ static struct aws_credentials_provider *s_aws_credentials_provider_new_ecs_or_im } clean_up: + if (ecs_or_imds_provider == NULL) { + AWS_LOGF_INFO( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "default chain: neither ECS nor IMDS will be used to retrieve credentials"); + } aws_string_destroy(ecs_relative_uri); aws_string_destroy(ecs_full_uri); aws_string_destroy(ec2_imds_disable); - aws_string_destroy(ecs_token); return ecs_or_imds_provider; } @@ -273,6 +287,7 @@ struct aws_credentials_provider *aws_credentials_provider_new_chain_default( struct aws_tls_ctx *tls_ctx = NULL; struct aws_credentials_provider *environment_provider = NULL; struct aws_credentials_provider *profile_provider = NULL; + struct aws_credentials_provider *process_provider = NULL; struct aws_credentials_provider *sts_provider = NULL; struct aws_credentials_provider *ecs_or_imds_provider = NULL; struct aws_credentials_provider *chain_provider = NULL; @@ -305,19 +320,23 @@ struct aws_credentials_provider *aws_credentials_provider_new_chain_default( #endif /* BYO_CRYPTO */ } - enum { providers_size = 4 }; + enum { providers_size = 5 }; struct aws_credentials_provider *providers[providers_size]; AWS_ZERO_ARRAY(providers); size_t index = 0; - struct aws_credentials_provider_environment_options environment_options; - AWS_ZERO_STRUCT(environment_options); - environment_provider = aws_credentials_provider_new_environment(allocator, &environment_options); - if (environment_provider == NULL) { - goto on_error; + /* Providers that touch fast local resources... */ + if (!options->skip_environment_credentials_provider) { + struct aws_credentials_provider_environment_options environment_options; + AWS_ZERO_STRUCT(environment_options); + environment_provider = aws_credentials_provider_new_environment(allocator, &environment_options); + if (environment_provider == NULL) { + goto on_error; + } + providers[index++] = environment_provider; } - providers[index++] = environment_provider; + /* Providers that will make a network call only if the relevant configuration is present... */ struct aws_credentials_provider_profile_options profile_options; AWS_ZERO_STRUCT(profile_options); @@ -325,6 +344,7 @@ struct aws_credentials_provider *aws_credentials_provider_new_chain_default( profile_options.tls_ctx = tls_ctx; profile_options.shutdown_options = sub_provider_shutdown_options; profile_options.profile_collection_cached = options->profile_collection_cached; + profile_options.profile_name_override = options->profile_name_override; profile_provider = aws_credentials_provider_new_profile(allocator, &profile_options); if (profile_provider != NULL) { providers[index++] = profile_provider; @@ -338,6 +358,7 @@ struct aws_credentials_provider *aws_credentials_provider_new_chain_default( sts_options.tls_ctx = tls_ctx; sts_options.shutdown_options = sub_provider_shutdown_options; sts_options.config_profile_collection_cached = options->profile_collection_cached; + sts_options.profile_name_override = options->profile_name_override; sts_provider = aws_credentials_provider_new_sts_web_identity(allocator, &sts_options); if (sts_provider != NULL) { providers[index++] = sts_provider; @@ -345,6 +366,20 @@ struct aws_credentials_provider *aws_credentials_provider_new_chain_default( aws_atomic_fetch_add(&impl->shutdowns_remaining, 1); } + struct aws_credentials_provider_process_options process_options; + AWS_ZERO_STRUCT(process_options); + process_options.shutdown_options = sub_provider_shutdown_options; + process_options.config_profile_collection_cached = options->profile_collection_cached; + process_options.profile_to_use = options->profile_name_override; + process_provider = aws_credentials_provider_new_process(allocator, &process_options); + if (process_provider != NULL) { + providers[index++] = process_provider; + /* 1 shutdown call from the process provider's shutdown */ + aws_atomic_fetch_add(&impl->shutdowns_remaining, 1); + } + + /* Providers that will always make a network call unless explicitly disabled... */ + ecs_or_imds_provider = s_aws_credentials_provider_new_ecs_or_imds( allocator, &sub_provider_shutdown_options, options->bootstrap, tls_ctx); if (ecs_or_imds_provider != NULL) { @@ -370,6 +405,7 @@ struct aws_credentials_provider *aws_credentials_provider_new_chain_default( */ aws_credentials_provider_release(environment_provider); aws_credentials_provider_release(profile_provider); + aws_credentials_provider_release(process_provider); aws_credentials_provider_release(sts_provider); aws_credentials_provider_release(ecs_or_imds_provider); @@ -411,6 +447,7 @@ on_error: } else { aws_credentials_provider_release(ecs_or_imds_provider); aws_credentials_provider_release(profile_provider); + aws_credentials_provider_release(process_provider); aws_credentials_provider_release(sts_provider); aws_credentials_provider_release(environment_provider); } diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_ecs.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_ecs.c index 7d96cf3a243..4f7497cbd41 100644 --- a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_ecs.c +++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_ecs.c @@ -3,11 +3,13 @@ * SPDX-License-Identifier: Apache-2.0. */ +#include "aws/common/byte_buf.h" #include <aws/auth/credentials.h> #include <aws/auth/private/credentials_utils.h> #include <aws/common/clock.h> #include <aws/common/date_time.h> +#include <aws/common/environment.h> #include <aws/common/string.h> #include <aws/http/connection.h> #include <aws/http/connection_manager.h> @@ -28,6 +30,9 @@ #define ECS_RESPONSE_SIZE_LIMIT 10000 #define ECS_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS 2 +AWS_STATIC_STRING_FROM_LITERAL(s_ecs_creds_env_token_file, "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE"); +AWS_STATIC_STRING_FROM_LITERAL(s_ecs_creds_env_token, "AWS_CONTAINER_AUTHORIZATION_TOKEN"); + static void s_on_connection_manager_shutdown(void *user_data); struct aws_credentials_provider_ecs_impl { @@ -35,6 +40,7 @@ struct aws_credentials_provider_ecs_impl { const struct aws_auth_http_system_vtable *function_table; struct aws_string *host; struct aws_string *path_and_query; + struct aws_string *auth_token_file_path; struct aws_string *auth_token; }; @@ -47,6 +53,7 @@ struct aws_credentials_provider_ecs_user_data { struct aws_credentials_provider *ecs_provider; aws_on_get_credentials_callback_fn *original_callback; void *original_user_data; + struct aws_byte_buf auth_token; /* mutable */ struct aws_http_connection *connection; @@ -68,6 +75,7 @@ static void s_aws_credentials_provider_ecs_user_data_destroy(struct aws_credenti impl->connection_manager, user_data->connection); } + aws_byte_buf_clean_up(&user_data->auth_token); aws_byte_buf_clean_up(&user_data->current_result); if (user_data->request) { @@ -84,9 +92,6 @@ static struct aws_credentials_provider_ecs_user_data *s_aws_credentials_provider struct aws_credentials_provider_ecs_user_data *wrapped_user_data = aws_mem_calloc(ecs_provider->allocator, 1, sizeof(struct aws_credentials_provider_ecs_user_data)); - if (wrapped_user_data == NULL) { - goto on_error; - } wrapped_user_data->allocator = ecs_provider->allocator; wrapped_user_data->ecs_provider = ecs_provider; @@ -98,12 +103,33 @@ static struct aws_credentials_provider_ecs_user_data *s_aws_credentials_provider goto on_error; } - return wrapped_user_data; + struct aws_credentials_provider_ecs_impl *impl = ecs_provider->impl; + if (impl->auth_token_file_path != NULL && impl->auth_token_file_path->len > 0) { + if (aws_byte_buf_init_from_file( + &wrapped_user_data->auth_token, + ecs_provider->allocator, + aws_string_c_str(impl->auth_token_file_path))) { + AWS_LOGF_ERROR( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p) ECS credentials provider failed to read token from the path: %s with error: %d", + (void *)ecs_provider, + aws_string_c_str(impl->auth_token_file_path), + aws_last_error()); + aws_raise_error(AWS_AUTH_CREDENTIALS_PROVIDER_ECS_INVALID_TOKEN_FILE_PATH); + goto on_error; + } + } else if (impl->auth_token != NULL && impl->auth_token->len > 0) { + if (aws_byte_buf_init_copy_from_cursor( + &wrapped_user_data->auth_token, + ecs_provider->allocator, + aws_byte_cursor_from_string(impl->auth_token))) { + goto on_error; + } + } + return wrapped_user_data; on_error: - s_aws_credentials_provider_ecs_user_data_destroy(wrapped_user_data); - return NULL; } @@ -318,10 +344,10 @@ static int s_make_ecs_http_query( goto on_error; } - if (impl->auth_token != NULL) { + if (ecs_user_data->auth_token.len) { struct aws_http_header auth_header = { .name = aws_byte_cursor_from_string(s_ecs_authorization_header), - .value = aws_byte_cursor_from_string(impl->auth_token), + .value = aws_byte_cursor_from_buf(&ecs_user_data->auth_token), }; if (aws_http_message_add_header(request, auth_header)) { goto on_error; @@ -431,6 +457,9 @@ static int s_credentials_provider_ecs_get_credentials_async( aws_on_get_credentials_callback_fn callback, void *user_data) { + AWS_LOGF_DEBUG( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, "id=%p: ECS provider trying to load credentials", (void *)provider); + struct aws_credentials_provider_ecs_impl *impl = provider->impl; struct aws_credentials_provider_ecs_user_data *wrapped_user_data = @@ -459,6 +488,7 @@ static void s_credentials_provider_ecs_destroy(struct aws_credentials_provider * aws_string_destroy(impl->path_and_query); aws_string_destroy(impl->auth_token); + aws_string_destroy(impl->auth_token_file_path); aws_string_destroy(impl->host); /* aws_http_connection_manager_release will eventually leads to call of s_on_connection_manager_shutdown, @@ -564,7 +594,18 @@ struct aws_credentials_provider *aws_credentials_provider_new_ecs( if (impl->auth_token == NULL) { goto on_error; } + } else { + /* read the environment variables */ + struct aws_string *ecs_env_token_file_path = NULL; + struct aws_string *ecs_env_token = NULL; + if (aws_get_environment_value(allocator, s_ecs_creds_env_token_file, &ecs_env_token_file_path) || + aws_get_environment_value(allocator, s_ecs_creds_env_token, &ecs_env_token)) { + goto on_error; + } + impl->auth_token_file_path = ecs_env_token_file_path; + impl->auth_token = ecs_env_token; } + impl->path_and_query = aws_string_new_from_cursor(allocator, &options->path_and_query); if (impl->path_and_query == NULL) { goto on_error; diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_environment.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_environment.c index 0aeac484118..4b070d9fc57 100644 --- a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_environment.c +++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_environment.c @@ -30,7 +30,7 @@ static int s_credentials_provider_environment_get_credentials_async( aws_get_environment_value(allocator, s_secret_access_key_env_var, &secret_access_key); aws_get_environment_value(allocator, s_session_token_env_var, &session_token); - if (access_key_id != NULL && secret_access_key != NULL) { + if (access_key_id != NULL && access_key_id->len > 0 && secret_access_key != NULL && secret_access_key->len > 0) { credentials = aws_credentials_new_from_string(allocator, access_key_id, secret_access_key, session_token, UINT64_MAX); if (credentials == NULL) { @@ -40,6 +40,17 @@ static int s_credentials_provider_environment_get_credentials_async( error_code = AWS_AUTH_CREDENTIALS_PROVIDER_INVALID_ENVIRONMENT; } + if (error_code == AWS_ERROR_SUCCESS) { + AWS_LOGF_INFO( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, "id=%p: Loaded credentials from environment variables", (void *)provider); + } else { + AWS_LOGF_INFO( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "id=%p: Failed to load credentials from environment variables: %s", + (void *)provider, + aws_error_str(error_code)); + } + callback(credentials, error_code, user_data); aws_credentials_release(credentials); diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_imds.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_imds.c index e7801ab26e9..952297049d6 100644 --- a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_imds.c +++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_imds.c @@ -84,6 +84,7 @@ struct aws_credentials_provider *aws_credentials_provider_new_imds( .bootstrap = options->bootstrap, .function_table = options->function_table, .imds_version = options->imds_version, + .ec2_metadata_v1_disabled = options->ec2_metadata_v1_disabled, .shutdown_options = { .shutdown_callback = s_on_imds_client_shutdown, @@ -154,6 +155,18 @@ on_error: static void s_on_get_credentials(const struct aws_credentials *credentials, int error_code, void *user_data) { (void)error_code; struct imds_provider_user_data *wrapped_user_data = user_data; + if (error_code == AWS_OP_SUCCESS) { + AWS_LOGF_INFO( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "id=%p: IMDS provider successfully retrieved credentials", + (void *)wrapped_user_data->imds_provider); + } else { + AWS_LOGF_INFO( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "id=%p: IMDS provider failed to retrieve credentials: %s", + (void *)wrapped_user_data->imds_provider, + aws_error_str(error_code)); + } wrapped_user_data->original_callback( (struct aws_credentials *)credentials, error_code, wrapped_user_data->original_user_data); s_imds_provider_user_data_destroy(wrapped_user_data); @@ -179,6 +192,11 @@ static void s_on_get_role(const struct aws_byte_buf *role, int error_code, void return; on_error: + AWS_LOGF_INFO( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "id=%p: IMDS provider failed to retrieve role: %s", + (void *)wrapped_user_data->imds_provider, + aws_error_str(error_code)); wrapped_user_data->original_callback( NULL, AWS_AUTH_CREDENTIALS_PROVIDER_IMDS_SOURCE_FAILURE, wrapped_user_data->original_user_data); s_imds_provider_user_data_destroy(wrapped_user_data); @@ -189,6 +207,9 @@ static int s_credentials_provider_imds_get_credentials_async( aws_on_get_credentials_callback_fn callback, void *user_data) { + AWS_LOGF_DEBUG( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, "id=%p: IMDS provider trying to load credentials", (void *)provider); + struct aws_credentials_provider_imds_impl *impl = provider->impl; struct imds_provider_user_data *wrapped_user_data = s_imds_provider_user_data_new(provider, callback, user_data); @@ -203,6 +224,11 @@ static int s_credentials_provider_imds_get_credentials_async( return AWS_OP_SUCCESS; error: + AWS_LOGF_ERROR( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "id=%p: IMDS provider failed to request credentials: %s", + (void *)provider, + aws_error_str(aws_last_error())); s_imds_provider_user_data_destroy(wrapped_user_data); return AWS_OP_ERR; } diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_process.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_process.c index e1c0bfbf00a..d043ff394e8 100644 --- a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_process.c +++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_process.c @@ -32,7 +32,6 @@ static int s_get_credentials_from_process( }; struct aws_run_command_result result; - int ret = AWS_OP_ERR; if (aws_run_command_result_init(provider->allocator, &result)) { goto on_finish; } @@ -50,7 +49,7 @@ static int s_get_credentials_from_process( struct aws_parse_credentials_from_json_doc_options parse_options = { .access_key_id_name = "AccessKeyId", .secret_access_key_name = "SecretAccessKey", - .token_name = "Token", + .token_name = "SessionToken", .expiration_name = "Expiration", .token_required = false, .expiration_required = false, @@ -71,7 +70,6 @@ static int s_get_credentials_from_process( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Process credentials provider successfully sourced credentials.", (void *)provider); - ret = AWS_OP_SUCCESS; on_finish: @@ -87,7 +85,7 @@ on_finish: callback(credentials, error_code, user_data); aws_run_command_result_cleanup(&result); aws_credentials_release(credentials); - return ret; + return AWS_OP_SUCCESS; } static void s_credentials_provider_process_destroy(struct aws_credentials_provider *provider) { @@ -100,7 +98,6 @@ static void s_credentials_provider_process_destroy(struct aws_credentials_provid } AWS_STATIC_STRING_FROM_LITERAL(s_credentials_process, "credential_process"); -static struct aws_byte_cursor s_default_profile_name_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("default"); static struct aws_profile_collection *s_load_profile(struct aws_allocator *allocator) { @@ -156,7 +153,9 @@ static void s_check_or_get_with_profile_config( } static struct aws_byte_cursor s_stderr_redirect_to_stdout = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" 2>&1"); -static struct aws_string *s_get_command(struct aws_allocator *allocator, struct aws_byte_cursor profile_cursor) { +static struct aws_string *s_get_command( + struct aws_allocator *allocator, + const struct aws_credentials_provider_process_options *options) { struct aws_byte_buf command_buf; AWS_ZERO_STRUCT(command_buf); @@ -164,13 +163,13 @@ static struct aws_string *s_get_command(struct aws_allocator *allocator, struct struct aws_profile_collection *config_profiles = NULL; struct aws_string *profile_name = NULL; const struct aws_profile *profile = NULL; - - config_profiles = s_load_profile(allocator); - if (profile_cursor.len == 0) { - profile_name = aws_get_profile_name(allocator, &s_default_profile_name_cursor); + if (options->config_profile_collection_cached) { + config_profiles = aws_profile_collection_acquire(options->config_profile_collection_cached); } else { - profile_name = aws_string_new_from_array(allocator, profile_cursor.ptr, profile_cursor.len); + config_profiles = s_load_profile(allocator); } + profile_name = aws_get_profile_name(allocator, &options->profile_to_use); + if (config_profiles && profile_name) { profile = aws_profile_collection_get_profile(config_profiles, profile_name); } @@ -206,7 +205,7 @@ static struct aws_string *s_get_command(struct aws_allocator *allocator, struct on_finish: aws_string_destroy(profile_name); - aws_profile_collection_destroy(config_profiles); + aws_profile_collection_release(config_profiles); aws_byte_buf_clean_up_secure(&command_buf); return command; } @@ -238,7 +237,7 @@ struct aws_credentials_provider *aws_credentials_provider_new_process( AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); - impl->command = s_get_command(allocator, options->profile_to_use); + impl->command = s_get_command(allocator, options); if (!impl->command) { goto on_error; } diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_profile.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_profile.c index 7b90b1b1b47..038322c93bd 100644 --- a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_profile.c +++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_profile.c @@ -7,6 +7,7 @@ #include <aws/auth/private/aws_profile.h> #include <aws/auth/private/credentials_utils.h> +#include <aws/common/hash_table.h> #include <aws/common/process.h> #include <aws/common/string.h> #include <aws/io/tls_channel_handler.h> @@ -24,6 +25,8 @@ AWS_STRING_FROM_LITERAL(s_role_arn_name, "role_arn"); AWS_STRING_FROM_LITERAL(s_role_session_name_name, "role_session_name"); AWS_STRING_FROM_LITERAL(s_credential_source_name, "credential_source"); AWS_STRING_FROM_LITERAL(s_source_profile_name, "source_profile"); +AWS_STRING_FROM_LITERAL(s_access_key_id_profile_var, "aws_access_key_id"); +AWS_STRING_FROM_LITERAL(s_secret_access_key_profile_var, "aws_secret_access_key"); static struct aws_byte_cursor s_default_session_name_pfx = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-common-runtime-profile-config"); @@ -134,6 +137,15 @@ static int s_profile_file_credentials_provider_get_credentials_async( } } + if (error_code == AWS_ERROR_SUCCESS) { + AWS_LOGF_INFO(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Loaded credentials from profile provider"); + } else { + AWS_LOGF_INFO( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "Failed to load credentials from profile provider: %s", + aws_error_str(error_code)); + } + callback(credentials, error_code, user_data); /* @@ -172,10 +184,14 @@ static struct aws_credentials_provider *s_create_profile_based_provider( struct aws_string *config_file_path, const struct aws_string *profile_name, struct aws_profile_collection *profile_collection_cached) { - struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_profile_file_impl *impl = NULL; + AWS_LOGF_INFO( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "static: profile %s attempting to create profile-based credentials provider", + aws_string_c_str(profile_name)); + aws_mem_acquire_many( allocator, 2, @@ -203,14 +219,19 @@ static struct aws_credentials_provider *s_create_profile_based_provider( return provider; } +static struct aws_credentials_provider *s_credentials_provider_new_profile_internal( + struct aws_allocator *allocator, + const struct aws_credentials_provider_profile_options *options, + struct aws_hash_table *source_profiles_table); + /* use the selected property that specifies a role_arn to load an STS based provider. */ static struct aws_credentials_provider *s_create_sts_based_provider( struct aws_allocator *allocator, const struct aws_profile_property *role_arn_property, const struct aws_profile *profile, - struct aws_string *credentials_file_path, - struct aws_string *config_file_path, - const struct aws_credentials_provider_profile_options *options) { + const struct aws_credentials_provider_profile_options *options, + struct aws_profile_collection *merged_profiles, + struct aws_hash_table *source_profiles_table) { struct aws_credentials_provider *provider = NULL; AWS_LOGF_INFO( @@ -291,12 +312,13 @@ static struct aws_credentials_provider *s_create_sts_based_provider( "static: source_profile set to %s", aws_string_c_str(aws_profile_property_get_value(source_profile_property))); - sts_options.creds_provider = s_create_profile_based_provider( - allocator, - credentials_file_path, - config_file_path, - aws_profile_property_get_value(source_profile_property), - options->profile_collection_cached); + struct aws_credentials_provider_profile_options profile_provider_options = *options; + profile_provider_options.profile_name_override = + aws_byte_cursor_from_string(aws_profile_property_get_value(source_profile_property)); + /* reuse profile collection instead of reading it again */ + profile_provider_options.profile_collection_cached = merged_profiles; + sts_options.creds_provider = + s_credentials_provider_new_profile_internal(allocator, &profile_provider_options, source_profiles_table); if (!sts_options.creds_provider) { goto done; @@ -363,9 +385,10 @@ done: return provider; } -struct aws_credentials_provider *aws_credentials_provider_new_profile( +static struct aws_credentials_provider *s_credentials_provider_new_profile_internal( struct aws_allocator *allocator, - const struct aws_credentials_provider_profile_options *options) { + const struct aws_credentials_provider_profile_options *options, + struct aws_hash_table *source_profiles_table) { struct aws_credentials_provider *provider = NULL; struct aws_profile_collection *config_profiles = NULL; @@ -374,6 +397,17 @@ struct aws_credentials_provider *aws_credentials_provider_new_profile( struct aws_string *credentials_file_path = NULL; struct aws_string *config_file_path = NULL; struct aws_string *profile_name = NULL; + bool first_profile_in_chain = false; + if (source_profiles_table == NULL) { + source_profiles_table = aws_mem_calloc(allocator, 1, sizeof(struct aws_hash_table)); + first_profile_in_chain = true; + /* source_profiles_table is an hashtable of (char *) -> NULL to detect recursion loop */ + if (aws_hash_table_init( + source_profiles_table, allocator, 3, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, NULL)) { + AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "hash_table_init failed"); + goto on_finished; + } + } profile_name = aws_get_profile_name(allocator, &options->profile_name_override); if (!profile_name) { @@ -428,10 +462,29 @@ struct aws_credentials_provider *aws_credentials_provider_new_profile( goto on_finished; } const struct aws_profile_property *role_arn_property = aws_profile_get_property(profile, s_role_arn_name); + bool profile_contains_access_key = aws_profile_get_property(profile, s_access_key_id_profile_var) != NULL; + bool profile_contains_secret_access_key = + aws_profile_get_property(profile, s_secret_access_key_profile_var) != NULL; + bool profile_contains_credentials = profile_contains_access_key || profile_contains_secret_access_key; + + struct aws_hash_element *element = NULL; + if (aws_hash_table_find(source_profiles_table, (void *)aws_string_c_str(profile_name), &element) == AWS_OP_ERR) { + AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "hash_table_find failed"); + goto on_finished; + } + if (element != NULL) { + /* self-reference chain of length 1 is allowed with static credentials */ + if (aws_hash_table_get_entry_count(source_profiles_table) > 1 || !profile_contains_credentials) { + AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "assumeRole chain contains a circular reference"); + aws_raise_error(AWS_AUTH_PROFILE_STS_CREDENTIALS_PROVIDER_CYCLE_FAILURE); + goto on_finished; + } + } - if (role_arn_property) { + aws_hash_table_put(source_profiles_table, (void *)aws_string_c_str(profile_name), NULL, 0); + if (role_arn_property && (first_profile_in_chain || !profile_contains_credentials)) { provider = s_create_sts_based_provider( - allocator, role_arn_property, profile, credentials_file_path, config_file_path, options); + allocator, role_arn_property, profile, options, merged_profiles, source_profiles_table); } else { provider = s_create_profile_based_provider( allocator, credentials_file_path, config_file_path, profile_name, options->profile_collection_cached); @@ -445,10 +498,20 @@ on_finished: aws_string_destroy(credentials_file_path); aws_string_destroy(config_file_path); aws_string_destroy(profile_name); - + if (first_profile_in_chain) { + aws_hash_table_clean_up(source_profiles_table); + aws_mem_release(allocator, source_profiles_table); + } if (provider) { provider->shutdown_options = options->shutdown_options; } return provider; } + +struct aws_credentials_provider *aws_credentials_provider_new_profile( + struct aws_allocator *allocator, + const struct aws_credentials_provider_profile_options *options) { + + return s_credentials_provider_new_profile_internal(allocator, options, NULL); +} diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts.c index 33d68f2ce85..0832cdeeee9 100644 --- a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts.c +++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts.c @@ -36,6 +36,10 @@ # pragma warning(disable : 4232) #endif +static int s_sts_xml_on_AssumeRoleResponse_child(struct aws_xml_node *, void *); +static int s_sts_xml_on_AssumeRoleResult_child(struct aws_xml_node *, void *); +static int s_sts_xml_on_Credentials_child(struct aws_xml_node *, void *); + static struct aws_http_header s_host_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("sts.amazonaws.com"), @@ -50,12 +54,6 @@ static struct aws_byte_cursor s_content_length = AWS_BYTE_CUR_INIT_FROM_STRING_L static struct aws_byte_cursor s_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/"); static struct aws_byte_cursor s_signing_region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("us-east-1"); static struct aws_byte_cursor s_service_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("sts"); -static struct aws_byte_cursor s_assume_role_root_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("AssumeRoleResponse"); -static struct aws_byte_cursor s_assume_role_result_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("AssumeRoleResult"); -static struct aws_byte_cursor s_assume_role_credentials_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Credentials"); -static struct aws_byte_cursor s_assume_role_session_token_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SessionToken"); -static struct aws_byte_cursor s_assume_role_secret_key_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SecretAccessKey"); -static struct aws_byte_cursor s_assume_role_access_key_id_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("AccessKeyId"); static const int s_max_retries = 8; const uint16_t aws_sts_assume_role_default_duration_secs = 900; @@ -190,7 +188,7 @@ static int s_on_incoming_body_fn(struct aws_http_stream *stream, const struct aw <AssumeRoleResult> <Credentials> <AccessKeyId>accessKeyId</AccessKeyId> - <SecretKey>secretKey</SecretKey> + <SecretAccessKey>secretKey</SecretAccessKey> <SessionToken>sessionToken</SessionToken> </Credentials> <AssumedRoleUser> @@ -200,56 +198,63 @@ static int s_on_incoming_body_fn(struct aws_http_stream *stream, const struct aw </AssumeRoleResult> </AssumeRoleResponse> */ -static bool s_on_node_encountered_fn(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) { - - struct aws_byte_cursor node_name; - AWS_ZERO_STRUCT(node_name); +static int s_sts_xml_on_root(struct aws_xml_node *node, void *user_data) { + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AssumeRoleResponse")) { + return aws_xml_node_traverse(node, s_sts_xml_on_AssumeRoleResponse_child, user_data); + } + return AWS_OP_SUCCESS; +} - if (aws_xml_node_get_name(node, &node_name)) { - AWS_LOGF_ERROR( - AWS_LS_AUTH_CREDENTIALS_PROVIDER, - "(id=%p): While parsing credentials xml response for sts credentials provider, could not get xml node name " - "for function s_on_node_encountered_fn.", - user_data); - return false; +static int s_sts_xml_on_AssumeRoleResponse_child(struct aws_xml_node *node, void *user_data) { + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AssumeRoleResult")) { + return aws_xml_node_traverse(node, s_sts_xml_on_AssumeRoleResult_child, user_data); } + return AWS_OP_SUCCESS; +} - if (aws_byte_cursor_eq_ignore_case(&node_name, &s_assume_role_root_name) || - aws_byte_cursor_eq_ignore_case(&node_name, &s_assume_role_result_name) || - aws_byte_cursor_eq_ignore_case(&node_name, &s_assume_role_credentials_name)) { - return aws_xml_node_traverse(parser, node, s_on_node_encountered_fn, user_data); +static int s_sts_xml_on_AssumeRoleResult_child(struct aws_xml_node *node, void *user_data) { + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Credentials")) { + return aws_xml_node_traverse(node, s_sts_xml_on_Credentials_child, user_data); } + return AWS_OP_SUCCESS; +} +static int s_sts_xml_on_Credentials_child(struct aws_xml_node *node, void *user_data) { struct sts_creds_provider_user_data *provider_user_data = user_data; + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); struct aws_byte_cursor credential_data; AWS_ZERO_STRUCT(credential_data); - if (aws_byte_cursor_eq_ignore_case(&node_name, &s_assume_role_access_key_id_name)) { - aws_xml_node_as_body(parser, node, &credential_data); - provider_user_data->access_key_id = - aws_string_new_from_array(provider_user_data->allocator, credential_data.ptr, credential_data.len); - - if (provider_user_data->access_key_id) { - AWS_LOGF_DEBUG( - AWS_LS_AUTH_CREDENTIALS_PROVIDER, - "(id=%p): Read AccessKeyId %s", - (void *)provider_user_data->provider, - aws_string_c_str(provider_user_data->access_key_id)); + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AccessKeyId")) { + if (aws_xml_node_as_body(node, &credential_data)) { + return AWS_OP_ERR; } + provider_user_data->access_key_id = aws_string_new_from_cursor(provider_user_data->allocator, &credential_data); + AWS_LOGF_DEBUG( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p): Read AccessKeyId %s", + (void *)provider_user_data->provider, + aws_string_c_str(provider_user_data->access_key_id)); } - if (aws_byte_cursor_eq_ignore_case(&node_name, &s_assume_role_secret_key_name)) { - aws_xml_node_as_body(parser, node, &credential_data); + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SecretAccessKey")) { + if (aws_xml_node_as_body(node, &credential_data)) { + return AWS_OP_ERR; + } provider_user_data->secret_access_key = - aws_string_new_from_array(provider_user_data->allocator, credential_data.ptr, credential_data.len); + aws_string_new_from_cursor(provider_user_data->allocator, &credential_data); } - if (aws_byte_cursor_eq_ignore_case(&node_name, &s_assume_role_session_token_name)) { - aws_xml_node_as_body(parser, node, &credential_data); - provider_user_data->session_token = - aws_string_new_from_array(provider_user_data->allocator, credential_data.ptr, credential_data.len); + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SessionToken")) { + if (aws_xml_node_as_body(node, &credential_data)) { + return AWS_OP_ERR; + } + provider_user_data->session_token = aws_string_new_from_cursor(provider_user_data->allocator, &credential_data); } - return true; + return AWS_OP_SUCCESS; } static void s_start_make_request( @@ -277,7 +282,6 @@ static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_co int http_response_code = 0; struct sts_creds_provider_user_data *provider_user_data = user_data; struct aws_credentials_provider_sts_impl *provider_impl = provider_user_data->provider->impl; - struct aws_xml_parser *xml_parser = NULL; provider_user_data->error_code = error_code; @@ -333,16 +337,6 @@ static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_co goto finish; } - struct aws_xml_parser_options options; - AWS_ZERO_STRUCT(options); - options.doc = aws_byte_cursor_from_buf(&provider_user_data->output_buf); - - xml_parser = aws_xml_parser_new(provider_user_data->provider->allocator, &options); - - if (xml_parser == NULL) { - goto finish; - } - uint64_t now = UINT64_MAX; if (provider_impl->system_clock_fn(&now) != AWS_OP_SUCCESS) { goto finish; @@ -350,13 +344,20 @@ static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_co uint64_t now_seconds = aws_timestamp_convert(now, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL); - if (aws_xml_parser_parse(xml_parser, s_on_node_encountered_fn, provider_user_data)) { + struct aws_xml_parser_options options = { + .doc = aws_byte_cursor_from_buf(&provider_user_data->output_buf), + .on_root_encountered = s_sts_xml_on_root, + .user_data = provider_user_data, + }; + if (aws_xml_parse(provider_user_data->provider->allocator, &options)) { provider_user_data->error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): credentials parsing failed with error %s", (void *)provider_user_data->credentials, aws_error_debug_str(provider_user_data->error_code)); + + provider_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_STS_SOURCE_FAILURE; goto finish; } @@ -369,7 +370,10 @@ static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_co provider_user_data->secret_access_key, provider_user_data->session_token, now_seconds + provider_impl->duration_seconds); - } else { + } + + if (provider_user_data->credentials == NULL) { + provider_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_STS_SOURCE_FAILURE; AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): credentials document was corrupted, treating as an error.", @@ -379,11 +383,6 @@ static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_co finish: - if (xml_parser != NULL) { - aws_xml_parser_destroy(xml_parser); - xml_parser = NULL; - } - s_clean_up_user_data(provider_user_data); } @@ -447,6 +446,7 @@ void s_on_signing_complete(struct aws_signing_result *result, int error_code, vo error_code); if (error_code) { + provider_user_data->error_code = error_code; aws_raise_error(error_code); goto error; } diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts_web_identity.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts_web_identity.c index 66d84d006ca..6b79391488d 100644 --- a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts_web_identity.c +++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts_web_identity.c @@ -37,6 +37,10 @@ #define STS_WEB_IDENTITY_MAX_ATTEMPTS 3 static void s_on_connection_manager_shutdown(void *user_data); +static int s_stswebid_error_xml_on_Error_child(struct aws_xml_node *, void *); +static int s_stswebid_200_xml_on_AssumeRoleWithWebIdentityResponse_child(struct aws_xml_node *, void *); +static int s_stswebid_200_xml_on_AssumeRoleWithWebIdentityResult_child(struct aws_xml_node *, void *); +static int s_stswebid_200_xml_on_Credentials_child(struct aws_xml_node *, void *); struct aws_credentials_provider_sts_web_identity_impl { struct aws_http_connection_manager *connection_manager; @@ -187,152 +191,150 @@ Error Response looks like: </Error> */ -static bool s_on_error_node_encountered_fn(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) { - - struct aws_byte_cursor node_name; - AWS_ZERO_STRUCT(node_name); - - if (aws_xml_node_get_name(node, &node_name)) { - AWS_LOGF_ERROR( - AWS_LS_AUTH_CREDENTIALS_PROVIDER, - "(id=%p): While parsing xml error response for sts web identity credentials provider, could not get xml " - "node name for function s_on_error_node_encountered_fn.", - user_data); - return false; - } - +static int s_stswebid_error_xml_on_root(struct aws_xml_node *node, void *user_data) { + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Error")) { - return aws_xml_node_traverse(parser, node, s_on_error_node_encountered_fn, user_data); + return aws_xml_node_traverse(node, s_stswebid_error_xml_on_Error_child, user_data); } + return AWS_OP_SUCCESS; +} + +static int s_stswebid_error_xml_on_Error_child(struct aws_xml_node *node, void *user_data) { bool *get_retryable_error = user_data; - struct aws_byte_cursor data_cursor; - AWS_ZERO_STRUCT(data_cursor); + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Code")) { - aws_xml_node_as_body(parser, node, &data_cursor); + + struct aws_byte_cursor data_cursor = {0}; + if (aws_xml_node_as_body(node, &data_cursor)) { + return AWS_OP_ERR; + } + if (aws_byte_cursor_eq_c_str_ignore_case(&data_cursor, "IDPCommunicationError") || aws_byte_cursor_eq_c_str_ignore_case(&data_cursor, "InvalidIdentityToken")) { *get_retryable_error = true; } } - return true; + return AWS_OP_SUCCESS; } static bool s_parse_retryable_error_from_response(struct aws_allocator *allocator, struct aws_byte_buf *response) { - struct aws_xml_parser_options options; - AWS_ZERO_STRUCT(options); - options.doc = aws_byte_cursor_from_buf(response); - - struct aws_xml_parser *xml_parser = aws_xml_parser_new(allocator, &options); - - if (xml_parser == NULL) { - AWS_LOGF_ERROR( - AWS_LS_AUTH_CREDENTIALS_PROVIDER, - "Failed to init xml parser for sts web identity credentials provider to parse error information."); - return false; - } bool get_retryable_error = false; - if (aws_xml_parser_parse(xml_parser, s_on_error_node_encountered_fn, &get_retryable_error)) { + struct aws_xml_parser_options options = { + .doc = aws_byte_cursor_from_buf(response), + .on_root_encountered = s_stswebid_error_xml_on_root, + .user_data = &get_retryable_error, + }; + + if (aws_xml_parse(allocator, &options)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse xml error response for sts web identity with error %s", aws_error_str(aws_last_error())); - aws_xml_parser_destroy(xml_parser); return false; } - aws_xml_parser_destroy(xml_parser); return get_retryable_error; } -static bool s_on_creds_node_encountered_fn(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) { +static int s_stswebid_200_xml_on_root(struct aws_xml_node *node, void *user_data) { + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AssumeRoleWithWebIdentityResponse")) { + return aws_xml_node_traverse(node, s_stswebid_200_xml_on_AssumeRoleWithWebIdentityResponse_child, user_data); + } + return AWS_OP_SUCCESS; +} - struct aws_byte_cursor node_name; - AWS_ZERO_STRUCT(node_name); +static int s_stswebid_200_xml_on_AssumeRoleWithWebIdentityResponse_child( + struct aws_xml_node *node, - if (aws_xml_node_get_name(node, &node_name)) { - AWS_LOGF_ERROR( - AWS_LS_AUTH_CREDENTIALS_PROVIDER, - "(id=%p): While parsing credentials xml response for sts web identity credentials provider, could not get " - "xml node name for function s_on_creds_node_encountered_fn.", - user_data); - return false; + void *user_data) { + + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AssumeRoleWithWebIdentityResult")) { + return aws_xml_node_traverse(node, s_stswebid_200_xml_on_AssumeRoleWithWebIdentityResult_child, user_data); } + return AWS_OP_SUCCESS; +} + +static int s_stswebid_200_xml_on_AssumeRoleWithWebIdentityResult_child( + struct aws_xml_node *node, - if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AssumeRoleWithWebIdentityResponse") || - aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AssumeRoleWithWebIdentityResult") || - aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Credentials")) { - return aws_xml_node_traverse(parser, node, s_on_creds_node_encountered_fn, user_data); + void *user_data) { + + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Credentials")) { + return aws_xml_node_traverse(node, s_stswebid_200_xml_on_Credentials_child, user_data); } + return AWS_OP_SUCCESS; +} +static int s_stswebid_200_xml_on_Credentials_child(struct aws_xml_node *node, void *user_data) { struct sts_web_identity_user_data *query_user_data = user_data; + + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); struct aws_byte_cursor credential_data; AWS_ZERO_STRUCT(credential_data); + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AccessKeyId")) { - aws_xml_node_as_body(parser, node, &credential_data); - query_user_data->access_key_id = - aws_string_new_from_array(query_user_data->allocator, credential_data.ptr, credential_data.len); + if (aws_xml_node_as_body(node, &credential_data)) { + return AWS_OP_ERR; + } + query_user_data->access_key_id = aws_string_new_from_cursor(query_user_data->allocator, &credential_data); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SecretAccessKey")) { - aws_xml_node_as_body(parser, node, &credential_data); - query_user_data->secret_access_key = - aws_string_new_from_array(query_user_data->allocator, credential_data.ptr, credential_data.len); + if (aws_xml_node_as_body(node, &credential_data)) { + return AWS_OP_ERR; + } + query_user_data->secret_access_key = aws_string_new_from_cursor(query_user_data->allocator, &credential_data); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SessionToken")) { - aws_xml_node_as_body(parser, node, &credential_data); - query_user_data->session_token = - aws_string_new_from_array(query_user_data->allocator, credential_data.ptr, credential_data.len); + if (aws_xml_node_as_body(node, &credential_data)) { + return AWS_OP_ERR; + } + query_user_data->session_token = aws_string_new_from_cursor(query_user_data->allocator, &credential_data); } /* As long as we parsed an usable expiration, use it, otherwise use * the existing one: now + 900s, initialized before parsing. */ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Expiration")) { - aws_xml_node_as_body(parser, node, &credential_data); + if (aws_xml_node_as_body(node, &credential_data)) { + return AWS_OP_ERR; + } if (credential_data.len != 0) { struct aws_date_time expiration; if (aws_date_time_init_from_str_cursor(&expiration, &credential_data, AWS_DATE_FORMAT_ISO_8601) == AWS_OP_SUCCESS) { query_user_data->expiration_timepoint_in_seconds = (uint64_t)aws_date_time_as_epoch_secs(&expiration); } else { - query_user_data->error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse time string from sts web identity xml response: %s", - aws_error_str(query_user_data->error_code)); + aws_error_str(aws_last_error())); + return AWS_OP_ERR; } } } - return true; + + return AWS_OP_SUCCESS; } static struct aws_credentials *s_parse_credentials_from_response( struct sts_web_identity_user_data *query_user_data, struct aws_byte_buf *response) { - if (!response || response->len == 0) { - return NULL; - } - struct aws_credentials *credentials = NULL; - struct aws_xml_parser_options options; - AWS_ZERO_STRUCT(options); - options.doc = aws_byte_cursor_from_buf(response); - - struct aws_xml_parser *xml_parser = aws_xml_parser_new(query_user_data->allocator, &options); - - if (xml_parser == NULL) { - AWS_LOGF_ERROR( - AWS_LS_AUTH_CREDENTIALS_PROVIDER, - "Failed to init xml parser for sts web identity credentials provider to parse error information."); - return NULL; + if (!response || response->len == 0) { + goto on_finish; } + uint64_t now = UINT64_MAX; if (aws_sys_clock_get_ticks(&now) != AWS_OP_SUCCESS) { AWS_LOGF_ERROR( @@ -343,7 +345,12 @@ static struct aws_credentials *s_parse_credentials_from_response( uint64_t now_seconds = aws_timestamp_convert(now, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL); query_user_data->expiration_timepoint_in_seconds = now_seconds + STS_WEB_IDENTITY_CREDS_DEFAULT_DURATION_SECONDS; - if (aws_xml_parser_parse(xml_parser, s_on_creds_node_encountered_fn, query_user_data)) { + struct aws_xml_parser_options options = { + .doc = aws_byte_cursor_from_buf(response), + .on_root_encountered = s_stswebid_200_xml_on_root, + .user_data = query_user_data, + }; + if (aws_xml_parse(query_user_data->allocator, &options)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse xml response for sts web identity with error: %s", @@ -352,6 +359,7 @@ static struct aws_credentials *s_parse_credentials_from_response( } if (!query_user_data->access_key_id || !query_user_data->secret_access_key) { + AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "STS web identity not found in XML response."); goto on_finish; } @@ -362,15 +370,16 @@ static struct aws_credentials *s_parse_credentials_from_response( aws_byte_cursor_from_string(query_user_data->session_token), query_user_data->expiration_timepoint_in_seconds); -on_finish: - if (credentials == NULL) { - query_user_data->error_code = aws_last_error(); + AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to create credentials for sts web identity"); + goto on_finish; } - if (xml_parser != NULL) { - aws_xml_parser_destroy(xml_parser); - xml_parser = NULL; +on_finish: + + if (credentials == NULL) { + /* Give a useful error (aws_last_error() might be AWS_ERROR_INVALID_ARGUMENT, which isn't too helpful) */ + query_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_STS_WEB_IDENTITY_SOURCE_FAILURE; } return credentials; @@ -739,6 +748,11 @@ static int s_credentials_provider_sts_web_identity_get_credentials_async( struct aws_credentials_provider_sts_web_identity_impl *impl = provider->impl; + AWS_LOGF_DEBUG( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "id=%p: STS_WEB_IDENTITY provider trying to load credentials", + (void *)provider); + struct sts_web_identity_user_data *wrapped_user_data = s_user_data_new(provider, callback, user_data); if (wrapped_user_data == NULL) { goto error; @@ -846,7 +860,6 @@ on_error: return NULL; } -static struct aws_byte_cursor s_default_profile_name_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("default"); static struct aws_byte_cursor s_dot_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("."); static struct aws_byte_cursor s_amazonaws_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(".amazonaws.com"); static struct aws_byte_cursor s_cn_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(".cn"); @@ -859,7 +872,7 @@ static int s_construct_endpoint( const struct aws_string *service_name) { if (!allocator || !endpoint || !region || !service_name) { - return AWS_ERROR_INVALID_ARGUMENT; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } aws_byte_buf_clean_up(endpoint); @@ -898,7 +911,7 @@ on_error: static int s_generate_uuid_to_buf(struct aws_allocator *allocator, struct aws_byte_buf *dst) { if (!allocator || !dst) { - return AWS_ERROR_INVALID_ARGUMENT; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_uuid uuid; @@ -926,6 +939,23 @@ static int s_generate_uuid_to_buf(struct aws_allocator *allocator, struct aws_by return AWS_OP_SUCCESS; } +static struct aws_string *s_check_or_get_with_env( + struct aws_allocator *allocator, + const struct aws_string *env_key, + struct aws_byte_cursor option) { + + AWS_ASSERT(allocator); + struct aws_string *out = NULL; + + if (option.len) { + out = aws_string_new_from_cursor(allocator, &option); + } else { + aws_get_environment_value(allocator, env_key, &out); + } + + return out; +} + static void s_check_or_get_with_profile_config( struct aws_allocator *allocator, const struct aws_profile *profile, @@ -959,7 +989,7 @@ static void s_parameters_destroy(struct sts_web_identity_parameters *parameters) static struct sts_web_identity_parameters *s_parameters_new( struct aws_allocator *allocator, - struct aws_profile_collection *config_profile_collection_cached) { + const struct aws_credentials_provider_sts_web_identity_options *options) { struct sts_web_identity_parameters *parameters = aws_mem_calloc(allocator, 1, sizeof(struct sts_web_identity_parameters)); @@ -969,20 +999,18 @@ static struct sts_web_identity_parameters *s_parameters_new( parameters->allocator = allocator; bool success = false; - struct aws_string *region = NULL; - struct aws_string *role_arn = NULL; - struct aws_string *role_session_name = NULL; - struct aws_string *token_file_path = NULL; - - /* check environment variables */ - aws_get_environment_value(allocator, s_region_env, ®ion); - aws_get_environment_value(allocator, s_role_arn_env, &role_arn); - aws_get_environment_value(allocator, s_role_session_name_env, &role_session_name); - aws_get_environment_value(allocator, s_token_file_path_env, &token_file_path); + struct aws_string *region = s_check_or_get_with_env(allocator, s_region_env, options->region); + struct aws_string *role_arn = s_check_or_get_with_env(allocator, s_role_arn_env, options->role_arn); + struct aws_string *role_session_name = + s_check_or_get_with_env(allocator, s_role_session_name_env, options->role_session_name); + struct aws_string *token_file_path = + s_check_or_get_with_env(allocator, s_token_file_path_env, options->token_file_path); + ; /** * check config profile if either region, role_arn or token_file_path or role_session_name is not resolved from - * environment variable. Role session name can also be generated by us using uuid if not found from both sources. + * environment variable. Role session name can also be generated by us using uuid if not found from both + * sources. */ struct aws_profile_collection *config_profile = NULL; struct aws_string *profile_name = NULL; @@ -990,9 +1018,9 @@ static struct sts_web_identity_parameters *s_parameters_new( bool get_all_parameters = (region && region->len && role_arn && role_arn->len && token_file_path && token_file_path->len); if (!get_all_parameters) { - if (config_profile_collection_cached) { + if (options->config_profile_collection_cached) { /* Use cached profile collection */ - config_profile = aws_profile_collection_acquire(config_profile_collection_cached); + config_profile = aws_profile_collection_acquire(options->config_profile_collection_cached); } else { /* Load profile collection from files */ config_profile = s_load_profile(allocator); @@ -1001,10 +1029,8 @@ static struct sts_web_identity_parameters *s_parameters_new( } } - profile_name = aws_get_profile_name(allocator, &s_default_profile_name_cursor); - if (profile_name) { - profile = aws_profile_collection_get_profile(config_profile, profile_name); - } + profile_name = aws_get_profile_name(allocator, &options->profile_name_override); + profile = aws_profile_collection_get_profile(config_profile, profile_name); if (!profile) { AWS_LOGF_ERROR( @@ -1080,8 +1106,7 @@ struct aws_credentials_provider *aws_credentials_provider_new_sts_web_identity( struct aws_allocator *allocator, const struct aws_credentials_provider_sts_web_identity_options *options) { - struct sts_web_identity_parameters *parameters = - s_parameters_new(allocator, options->config_profile_collection_cached); + struct sts_web_identity_parameters *parameters = s_parameters_new(allocator, options); if (!parameters) { return NULL; } diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_utils.c b/contrib/restricted/aws/aws-c-auth/source/credentials_utils.c index a1ee268c379..47ca8863f63 100644 --- a/contrib/restricted/aws/aws-c-auth/source/credentials_utils.c +++ b/contrib/restricted/aws/aws-c-auth/source/credentials_utils.c @@ -30,6 +30,7 @@ static struct aws_auth_http_system_vtable s_default_function_table = { .aws_http_stream_get_incoming_response_status = aws_http_stream_get_incoming_response_status, .aws_http_stream_release = aws_http_stream_release, .aws_http_connection_close = aws_http_connection_close, + .aws_high_res_clock_get_ticks = aws_high_res_clock_get_ticks, }; const struct aws_auth_http_system_vtable *g_aws_credentials_provider_http_function_table = &s_default_function_table; diff --git a/contrib/restricted/aws/aws-c-auth/source/signable_chunk.c b/contrib/restricted/aws/aws-c-auth/source/signable_chunk.c index 302ce9d3a1a..8ce78504df6 100644 --- a/contrib/restricted/aws/aws-c-auth/source/signable_chunk.c +++ b/contrib/restricted/aws/aws-c-auth/source/signable_chunk.c @@ -30,7 +30,7 @@ static int s_aws_signable_chunk_get_property( if (aws_string_eq(name, g_aws_previous_signature_property_name)) { *out_value = aws_byte_cursor_from_string(impl->previous_signature); } else { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; @@ -44,7 +44,7 @@ static int s_aws_signable_chunk_get_property_list( (void)name; (void)out_list; - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } static int s_aws_signable_chunk_get_payload_stream( diff --git a/contrib/restricted/aws/aws-c-auth/source/signable_http_request.c b/contrib/restricted/aws/aws-c-auth/source/signable_http_request.c index 1236026052f..59bda9642f4 100644 --- a/contrib/restricted/aws/aws-c-auth/source/signable_http_request.c +++ b/contrib/restricted/aws/aws-c-auth/source/signable_http_request.c @@ -32,7 +32,7 @@ static int s_aws_signable_http_request_get_property( } else if (aws_string_eq(name, g_aws_http_method_property_name)) { aws_http_message_get_request_method(impl->request, out_value); } else { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; @@ -50,7 +50,7 @@ static int s_aws_signable_http_request_get_property_list( if (aws_string_eq(name, g_aws_http_headers_property_list_name)) { *out_list = &impl->headers; } else { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } return AWS_OP_SUCCESS; @@ -76,6 +76,7 @@ static void s_aws_signable_http_request_destroy(struct aws_signable *signable) { return; } + aws_http_message_release(impl->request); aws_array_list_clean_up(&impl->headers); aws_mem_release(signable->allocator, signable); } @@ -118,7 +119,7 @@ struct aws_signable *aws_signable_new_http_request(struct aws_allocator *allocat aws_array_list_push_back(&impl->headers, &property); } - impl->request = request; + impl->request = aws_http_message_acquire(request); return signable; diff --git a/contrib/restricted/aws/aws-c-auth/source/signable_trailer.c b/contrib/restricted/aws/aws-c-auth/source/signable_trailer.c index b5742643de5..a426d185320 100644 --- a/contrib/restricted/aws/aws-c-auth/source/signable_trailer.c +++ b/contrib/restricted/aws/aws-c-auth/source/signable_trailer.c @@ -28,7 +28,7 @@ static int s_aws_signable_trailing_headers_get_property( if (aws_string_eq(name, g_aws_previous_signature_property_name)) { *out_value = aws_byte_cursor_from_string(impl->previous_signature); } else { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } @@ -48,7 +48,7 @@ static int s_aws_signable_trailing_headers_get_property_list( if (aws_string_eq(name, g_aws_http_headers_property_list_name)) { *out_list = &impl->headers; } else { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } return AWS_OP_SUCCESS; diff --git a/contrib/restricted/aws/aws-c-auth/source/signing_config.c b/contrib/restricted/aws/aws-c-auth/source/signing_config.c index c0b6b0f2dd3..44411d75b7a 100644 --- a/contrib/restricted/aws/aws-c-auth/source/signing_config.c +++ b/contrib/restricted/aws/aws-c-auth/source/signing_config.c @@ -36,6 +36,9 @@ const char *aws_signing_algorithm_to_string(enum aws_signing_algorithm algorithm case AWS_SIGNING_ALGORITHM_V4: return "SigV4"; + case AWS_SIGNING_ALGORITHM_V4_S3EXPRESS: + return "SigV4S3Express"; + case AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC: return "SigV4Asymmetric"; @@ -114,6 +117,28 @@ int aws_validate_aws_signing_config_aws(const struct aws_signing_config_aws *con } } break; + case AWS_SIGNING_ALGORITHM_V4_S3EXPRESS: + if (config->credentials == NULL && config->credentials_provider == NULL) { + AWS_LOGF_ERROR( + AWS_LS_AUTH_SIGNING, + "(id=%p) Sigv4 S3 Express signing config is missing a credentials provider or credentials", + (void *)config); + return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION); + } + + if (config->credentials != NULL) { + if (aws_credentials_is_anonymous(config->credentials) || + aws_credentials_get_access_key_id(config->credentials).len == 0 || + aws_credentials_get_secret_access_key(config->credentials).len == 0 || + aws_credentials_get_session_token(config->credentials).len == 0) { + AWS_LOGF_ERROR( + AWS_LS_AUTH_SIGNING, + "(id=%p) Sigv4 S3 Express signing configured with invalid credentials", + (void *)config); + return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CREDENTIALS); + } + } + break; case AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC: if (config->credentials == NULL && config->credentials_provider == NULL) { diff --git a/contrib/restricted/aws/aws-c-auth/source/sigv4_http_request.c b/contrib/restricted/aws/aws-c-auth/source/sigv4_http_request.c index 6e73f2e7f9e..29dca23cee5 100644 --- a/contrib/restricted/aws/aws-c-auth/source/sigv4_http_request.c +++ b/contrib/restricted/aws/aws-c-auth/source/sigv4_http_request.c @@ -154,7 +154,7 @@ int aws_apply_signing_result_to_http_request( } if (source_header.name == NULL || source_header.value == NULL) { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_http_header dest_header = { diff --git a/contrib/restricted/aws/aws-c-auth/ya.make b/contrib/restricted/aws/aws-c-auth/ya.make index ef7a8fc1530..a4d99235ebd 100644 --- a/contrib/restricted/aws/aws-c-auth/ya.make +++ b/contrib/restricted/aws/aws-c-auth/ya.make @@ -1,4 +1,4 @@ -# Generated by devtools/yamaker from nixpkgs 23.05. +# Generated by devtools/yamaker from nixpkgs 24.05. LIBRARY() @@ -6,9 +6,9 @@ LICENSE(Apache-2.0) LICENSE_TEXTS(.yandex_meta/licenses.list.txt) -VERSION(0.6.27) +VERSION(0.7.18) -ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-auth/archive/v0.6.27.tar.gz) +ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-auth/archive/v0.7.18.tar.gz) PEERDIR( contrib/restricted/aws/aws-c-cal @@ -37,18 +37,7 @@ CFLAGS( -DAWS_USE_EPOLL -DCJSON_HIDE_SYMBOLS -DHAVE_SYSCONF - -DS2N_CLONE_SUPPORTED - -DS2N_CPUID_AVAILABLE - -DS2N_FALL_THROUGH_SUPPORTED - -DS2N_FEATURES_AVAILABLE - -DS2N_KYBER512R3_AVX2_BMI2 - -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH - -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX - -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4 - -DS2N_MADVISE_SUPPORTED - -DS2N_PLATFORM_SUPPORTS_KTLS - -DS2N_STACKTRACE - -DS2N___RESTRICT__SUPPORTED + -DINTEL_NO_ITTNOTIFY_API ) IF (OS_WINDOWS) diff --git a/contrib/restricted/aws/aws-c-cal/.yandex_meta/__init__.py b/contrib/restricted/aws/aws-c-cal/.yandex_meta/__init__.py index 54f4b1b1833..a80cbc02913 100644 --- a/contrib/restricted/aws/aws-c-cal/.yandex_meta/__init__.py +++ b/contrib/restricted/aws/aws-c-cal/.yandex_meta/__init__.py @@ -1,4 +1,5 @@ from devtools.yamaker.fileutil import files +from devtools.yamaker import pathutil from devtools.yamaker.modules import Linkable, Switch, Words from devtools.yamaker.project import CMakeNinjaNixProject @@ -7,7 +8,7 @@ def post_install(self): m = self.yamakes["."] # Support Darwin. - linux_srcs = files(self.srcdir + "/source/unix/", rel=self.srcdir) + linux_srcs = files(self.srcdir + "/source/unix/", rel=self.srcdir, test=pathutil.is_source) darwin_srcs = files(self.srcdir + "/source/darwin/", rel=self.srcdir) windows_srcs = files(self.srcdir + "/source/windows/", rel=self.srcdir) m.SRCS -= set(linux_srcs) @@ -39,6 +40,9 @@ aws_c_cal = CMakeNinjaNixProject( "source/darwin/", "source/windows/", ], + disable_includes=[ + "openssl/evp_errors.h", + ], ignore_targets=["sha256_profile"], post_install=post_install, ) diff --git a/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.copyrights.report b/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.copyrights.report index 62a09c2294d..e9462dad347 100644 --- a/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.copyrights.report +++ b/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.copyrights.report @@ -48,7 +48,10 @@ BELONGS ya.make include/aws/cal/hmac.h [4:4] include/aws/cal/private/der.h [4:4] include/aws/cal/private/ecc.h [4:4] + include/aws/cal/private/opensslcrypto_common.h [5:5] + include/aws/cal/private/rsa.h [4:4] include/aws/cal/private/symmetric_cipher_priv.h [4:4] + include/aws/cal/rsa.h [4:4] include/aws/cal/symmetric_cipher.h [4:4] source/cal.c [2:2] source/darwin/commoncrypto_aes.c [2:2] @@ -58,13 +61,16 @@ BELONGS ya.make source/darwin/commoncrypto_sha1.c [2:2] source/darwin/commoncrypto_sha256.c [2:2] source/darwin/securityframework_ecc.c [2:2] + source/darwin/securityframework_rsa.c [2:2] source/der.c [2:2] source/ecc.c [2:2] source/hash.c [2:2] source/hmac.c [2:2] + source/rsa.c [2:2] source/symmetric_cipher.c [2:2] source/unix/openssl_aes.c [2:2] source/unix/openssl_platform_init.c [2:2] + source/unix/openssl_rsa.c [2:2] source/unix/opensslcrypto_ecc.c [2:2] source/unix/opensslcrypto_hash.c [2:2] source/unix/opensslcrypto_hmac.c [2:2] @@ -73,6 +79,7 @@ BELONGS ya.make source/windows/bcrypt_hash.c [2:2] source/windows/bcrypt_hmac.c [2:2] source/windows/bcrypt_platform_init.c [2:2] + source/windows/bcrypt_rsa.c [2:2] KEEP COPYRIGHT_SERVICE_LABEL 9b3428451fa759287a2e04cd16a4619c BELONGS ya.make diff --git a/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.licenses.report b/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.licenses.report index ad73f02034c..a69318d532a 100644 --- a/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.licenses.report +++ b/contrib/restricted/aws/aws-c-cal/.yandex_meta/devtools.licenses.report @@ -109,7 +109,10 @@ BELONGS ya.make include/aws/cal/hmac.h [5:5] include/aws/cal/private/der.h [5:5] include/aws/cal/private/ecc.h [5:5] + include/aws/cal/private/opensslcrypto_common.h [6:6] + include/aws/cal/private/rsa.h [5:5] include/aws/cal/private/symmetric_cipher_priv.h [5:5] + include/aws/cal/rsa.h [5:5] include/aws/cal/symmetric_cipher.h [5:5] source/cal.c [3:3] source/darwin/commoncrypto_aes.c [3:3] @@ -119,13 +122,16 @@ BELONGS ya.make source/darwin/commoncrypto_sha1.c [3:3] source/darwin/commoncrypto_sha256.c [3:3] source/darwin/securityframework_ecc.c [3:3] + source/darwin/securityframework_rsa.c [3:3] source/der.c [3:3] source/ecc.c [3:3] source/hash.c [3:3] source/hmac.c [3:3] + source/rsa.c [3:3] source/symmetric_cipher.c [3:3] source/unix/openssl_aes.c [3:3] source/unix/openssl_platform_init.c [3:3] + source/unix/openssl_rsa.c [3:3] source/unix/opensslcrypto_ecc.c [3:3] source/unix/opensslcrypto_hash.c [3:3] source/unix/opensslcrypto_hmac.c [3:3] @@ -134,6 +140,7 @@ BELONGS ya.make source/windows/bcrypt_hash.c [3:3] source/windows/bcrypt_hmac.c [3:3] source/windows/bcrypt_platform_init.c [3:3] + source/windows/bcrypt_rsa.c [3:3] SKIP LicenseRef-scancode-generic-cla ee24fdc60600747c7d12c32055b0011d BELONGS ya.make diff --git a/contrib/restricted/aws/aws-c-cal/.yandex_meta/override.nix b/contrib/restricted/aws/aws-c-cal/.yandex_meta/override.nix index a597ecae165..3cba6d88f0b 100644 --- a/contrib/restricted/aws/aws-c-cal/.yandex_meta/override.nix +++ b/contrib/restricted/aws/aws-c-cal/.yandex_meta/override.nix @@ -1,10 +1,10 @@ pkgs: attrs: with pkgs; with attrs; rec { - version = "0.5.26"; + version = "0.6.12"; src = fetchFromGitHub { owner = "awslabs"; repo = "aws-c-cal"; rev = "v${version}"; - hash = "sha256-X325dMH3mZBvRqZ540ZsmcJ/N5PbDxzGDf5Gi+XXP0c="; + hash = "sha256-aegK01wYdOc6RGNVf/dZKn1HkqQr+yEblcu6hnlMZE4="; }; } diff --git a/contrib/restricted/aws/aws-c-cal/README.md b/contrib/restricted/aws/aws-c-cal/README.md index b50c8d4aa41..9522401c508 100644 --- a/contrib/restricted/aws/aws-c-cal/README.md +++ b/contrib/restricted/aws/aws-c-cal/README.md @@ -19,7 +19,8 @@ CMake 3.0+ is required to build. #### Linux-Only Dependencies -If you are building on Linux, you will need to build aws-lc first. +If you are building on Linux, there are several options for crypto libraries. +Preferred choice is aws-lc, that can be build as follows. ``` git clone [email protected]:awslabs/aws-lc.git @@ -27,6 +28,11 @@ cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX=<install-path> cmake --build aws-lc/build --target install ``` +Alternatively, OpenSSL versions 1.0.2 or 1.1.1 or BoringSSL at commit 9939e14 +(other commits are not tested and not guaranteed to work) can be used. To build +against OpenSSL or BoringSSL specify -DUSE_OPENSSL=ON. Typical OpenSSL flags can +be used to help project locate artifacts (-DLibCrypto_INCLUDE_DIR and -DLibCrypto_STATIC_LIBRARY) + #### Building aws-c-cal and Remaining Dependencies ``` diff --git a/contrib/restricted/aws/aws-c-cal/include/aws/cal/cal.h b/contrib/restricted/aws/aws-c-cal/include/aws/cal/cal.h index 2c6c909838c..3cb6cf07bc1 100644 --- a/contrib/restricted/aws/aws-c-cal/include/aws/cal/cal.h +++ b/contrib/restricted/aws/aws-c-cal/include/aws/cal/cal.h @@ -10,6 +10,8 @@ #include <aws/cal/exports.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_allocator; #define AWS_C_CAL_PACKAGE_ID 7 @@ -24,6 +26,9 @@ enum aws_cal_errors { AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM, AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM, AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM, + AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT, + AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT, + AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED, AWS_ERROR_CAL_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_CAL_PACKAGE_ID) }; @@ -34,6 +39,7 @@ enum aws_cal_log_subject { AWS_LS_CAL_HMAC, AWS_LS_CAL_DER, AWS_LS_CAL_LIBCRYPTO_RESOLVE, + AWS_LS_CAL_RSA, AWS_LS_CAL_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_CAL_PACKAGE_ID) }; @@ -43,6 +49,12 @@ AWS_EXTERN_C_BEGIN AWS_CAL_API void aws_cal_library_init(struct aws_allocator *allocator); AWS_CAL_API void aws_cal_library_clean_up(void); +/* + * Every CRT thread that might invoke aws-lc functionality should call this as part of the thread at_exit process + */ +AWS_CAL_API void aws_cal_thread_clean_up(void); + AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_CAL_CAL_H */ diff --git a/contrib/restricted/aws/aws-c-cal/include/aws/cal/ecc.h b/contrib/restricted/aws/aws-c-cal/include/aws/cal/ecc.h index 660c26d79bd..4941fa93e92 100644 --- a/contrib/restricted/aws/aws-c-cal/include/aws/cal/ecc.h +++ b/contrib/restricted/aws/aws-c-cal/include/aws/cal/ecc.h @@ -10,6 +10,8 @@ #include <aws/common/byte_buf.h> #include <aws/common/common.h> +AWS_PUSH_SANE_WARNING_LEVEL + enum aws_ecc_curve_name { AWS_CAL_ECDSA_P256, AWS_CAL_ECDSA_P384, @@ -62,7 +64,7 @@ AWS_CAL_API void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair); AWS_CAL_API void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair); /** - * Creates a Eliptic Curve private key that can be used for signing. + * Creates an Elliptic Curve private key that can be used for signing. * Returns a new instance of aws_ecc_key_pair if the key was successfully built. * Otherwise returns NULL. Note: priv_key::len must match the appropriate length * for the selected curve_name. @@ -74,9 +76,14 @@ AWS_CAL_API struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( #if !defined(AWS_OS_IOS) /** - * Creates a Eliptic Curve public/private key pair that can be used for signing and verifying. + * Creates an Elliptic Curve public/private key pair that can be used for signing and verifying. * Returns a new instance of aws_ecc_key_pair if the key was successfully built. * Otherwise returns NULL. + * Note: On Apple platforms this function is only supported on MacOS. This is + * due to usage of SecItemExport, which is only available on MacOS 10.7+ + * (yes, MacOS only and no other Apple platforms). There are alternatives for + * ios and other platforms, but they are ugly to use. Hence for now it only + * supports this call on MacOS. */ AWS_CAL_API struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, @@ -84,7 +91,7 @@ AWS_CAL_API struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( #endif /* !AWS_OS_IOS */ /** - * Creates a Eliptic Curve public key that can be used for verifying. + * Creates an Elliptic Curve public key that can be used for verifying. * Returns a new instance of aws_ecc_key_pair if the key was successfully built. * Otherwise returns NULL. Note: public_key_x::len and public_key_y::len must * match the appropriate length for the selected curve_name. @@ -96,7 +103,7 @@ AWS_CAL_API struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( const struct aws_byte_cursor *public_key_y); /** - * Creates a Eliptic Curve public/private key pair from a DER encoded key pair. + * Creates an Elliptic Curve public/private key pair from a DER encoded key pair. * Returns a new instance of aws_ecc_key_pair if the key was successfully built. * Otherwise returns NULL. Whether or not signing or verification can be perform depends * on if encoded_keys is a public/private pair or a public key. @@ -173,5 +180,6 @@ AWS_CAL_API void aws_ecc_key_pair_get_private_key( AWS_CAL_API size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_CAL_ECC_H */ diff --git a/contrib/restricted/aws/aws-c-cal/include/aws/cal/hash.h b/contrib/restricted/aws/aws-c-cal/include/aws/cal/hash.h index 16be7210442..97d6e3c3a2c 100644 --- a/contrib/restricted/aws/aws-c-cal/include/aws/cal/hash.h +++ b/contrib/restricted/aws/aws-c-cal/include/aws/cal/hash.h @@ -9,6 +9,8 @@ #include <aws/common/byte_buf.h> #include <aws/common/common.h> +AWS_PUSH_SANE_WARNING_LEVEL + #define AWS_SHA256_LEN 32 #define AWS_SHA1_LEN 20 #define AWS_MD5_LEN 16 @@ -132,5 +134,6 @@ AWS_CAL_API void aws_set_sha256_new_fn(aws_hash_new_fn *fn); AWS_CAL_API void aws_set_sha1_new_fn(aws_hash_new_fn *fn); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_CAL_HASH_H_ */ diff --git a/contrib/restricted/aws/aws-c-cal/include/aws/cal/hmac.h b/contrib/restricted/aws/aws-c-cal/include/aws/cal/hmac.h index 6caf5cd7852..d02d7013978 100644 --- a/contrib/restricted/aws/aws-c-cal/include/aws/cal/hmac.h +++ b/contrib/restricted/aws/aws-c-cal/include/aws/cal/hmac.h @@ -9,6 +9,8 @@ #include <aws/common/byte_buf.h> #include <aws/common/common.h> +AWS_PUSH_SANE_WARNING_LEVEL + #define AWS_SHA256_HMAC_LEN 32 struct aws_hmac; @@ -80,5 +82,6 @@ AWS_CAL_API int aws_sha256_hmac_compute( AWS_CAL_API void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_CAL_HASH_H_ */ diff --git a/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/der.h b/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/der.h index 3486e3f4764..cbec136bdd3 100644 --- a/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/der.h +++ b/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/der.h @@ -13,6 +13,23 @@ struct aws_der_encoder; struct aws_der_decoder; +/* + * Note: encoder/decoder only supports unsigned representations of integers and usage + * of signed integers might lead to unexpected results. + * Context: DER spec requires ints to be stored in big endian format with MSB + * representing signedness. To disambiguate between negative number and big + * positive number, null byte can be added in front of positive number. DER spec + * requires representation to be the shortest possible one. + * During encoding aws_der_encoder_write_unsigned_integer assumes that cursor + * points to a positive number and will prepend 0 if needed by DER spec to + * indicate its positive number. Encoder does not support writing negative numbers. + * Decoder aws_der_encoder_write_unsigned_integer will strip any leading 0 as + * needed and will error out if der contains negative number. + * Take special care when integrating with 3p libraries cause they might expect + * different format. Ex. this format matches what openssl calls bin format + * (BN_bin2bn) and might not work as expected with openssl mpi format. + */ + enum aws_der_type { /* Primitives */ AWS_DER_BOOLEAN = 0x01, @@ -69,7 +86,7 @@ AWS_CAL_API void aws_der_encoder_destroy(struct aws_der_encoder *encoder); * @param integer A cursor pointing to the integer's memory * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ -AWS_CAL_API int aws_der_encoder_write_integer(struct aws_der_encoder *encoder, struct aws_byte_cursor integer); +AWS_CAL_API int aws_der_encoder_write_unsigned_integer(struct aws_der_encoder *encoder, struct aws_byte_cursor integer); /** * Writes a boolean to the DER stream * @param encoder The encoder to use @@ -195,7 +212,7 @@ AWS_CAL_API int aws_der_decoder_tlv_string(struct aws_der_decoder *decoder, stru * @param integer The buffer to store the integer into * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ -AWS_CAL_API int aws_der_decoder_tlv_integer(struct aws_der_decoder *decoder, struct aws_byte_cursor *integer); +AWS_CAL_API int aws_der_decoder_tlv_unsigned_integer(struct aws_der_decoder *decoder, struct aws_byte_cursor *integer); /** * Extracts the current TLV BOOLEAN value (BOOLEAN) diff --git a/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/opensslcrypto_common.h b/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/opensslcrypto_common.h index 1c80b9d5133..79b0fafb4cd 100644 --- a/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/opensslcrypto_common.h +++ b/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/opensslcrypto_common.h @@ -1,28 +1,62 @@ #ifndef AWS_C_CAL_OPENSSLCRYPTO_COMMON_H #define AWS_C_CAL_OPENSSLCRYPTO_COMMON_H +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#define OPENSSL_SUPPRESS_DEPRECATED #include <openssl/crypto.h> #include <openssl/evp.h> #include <openssl/hmac.h> +#if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) +# define OPENSSL_IS_OPENSSL +#endif + +/* + * There are some differences in function definitions between OpenSSL 1.0.2 and + * 1.1.1, aws-lc and boringssl. + * This file defines some common wrappers that abstract away those differences. + * For OpenSSL we currently support building against 1.0.2 or 1.1.1, and can + * detect version used at runtime and dyn load those symbols correctly. + * For OpenSSL 3.0 the code will compile and run, but largely because we disable + * deprecation warnings. + * For aws-lc and boringssl code must be compiled against the same version as + * the runtime lib. + */ + typedef HMAC_CTX *(*hmac_ctx_new)(void); -typedef void (*hmac_ctx_reset)(HMAC_CTX *); typedef void (*hmac_ctx_free)(HMAC_CTX *); typedef void (*hmac_ctx_init)(HMAC_CTX *); -typedef int (*hmac_ctx_init_ex)(HMAC_CTX *, const void *, size_t, const EVP_MD *, ENGINE *); typedef void (*hmac_ctx_clean_up)(HMAC_CTX *); -typedef int (*hmac_ctx_update)(HMAC_CTX *, const unsigned char *, size_t); -typedef int (*hmac_ctx_final)(HMAC_CTX *, unsigned char *, unsigned int *); +typedef int (*hmac_init_ex)(HMAC_CTX *, const void *, size_t, const EVP_MD *, ENGINE *); +typedef int (*hmac_update)(HMAC_CTX *, const unsigned char *, size_t); +typedef int (*hmac_final)(HMAC_CTX *, unsigned char *, unsigned int *); + +/* C standard does not have concept of generic function pointer, but it does +guarantee that function pointer casts will roundtrip when casting to any type and +then back. Use void *(void) as a generic function pointer. */ +typedef void (*crypto_generic_fn_ptr)(void); struct openssl_hmac_ctx_table { hmac_ctx_new new_fn; hmac_ctx_free free_fn; hmac_ctx_init init_fn; - hmac_ctx_init_ex init_ex_fn; hmac_ctx_clean_up clean_up_fn; - hmac_ctx_update update_fn; - hmac_ctx_final final_fn; - hmac_ctx_reset reset_fn; + hmac_init_ex init_ex_fn; + hmac_update update_fn; + hmac_final final_fn; + + /* There is slight variance between the crypto interfaces. + Note that function pointer casting is undefined behavior. + To workaround the issue, use generic pointer for crypto and let delegate + function cast it back to correct type. + Do not use following fields manually. */ + struct { + crypto_generic_fn_ptr init_ex_fn; + } impl; }; extern struct openssl_hmac_ctx_table *g_aws_openssl_hmac_ctx_table; diff --git a/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/rsa.h b/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/rsa.h new file mode 100644 index 00000000000..37a7a3c293e --- /dev/null +++ b/contrib/restricted/aws/aws-c-cal/include/aws/cal/private/rsa.h @@ -0,0 +1,118 @@ +#ifndef AWS_C_CAL_PRIVATE_RSA_H +#define AWS_C_CAL_PRIVATE_RSA_H +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include <aws/cal/rsa.h> + +#include <aws/common/byte_buf.h> +#include <aws/common/ref_count.h> + +struct aws_rsa_key_pair; +struct aws_der_decoder; + +struct aws_rsa_key_vtable { + int (*encrypt)( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor plaintext, + struct aws_byte_buf *out); + int (*decrypt)( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor ciphertext, + struct aws_byte_buf *out); + + int (*sign)( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_buf *out); + + int (*verify)( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_cursor signature); +}; + +struct aws_rsa_key_pair { + struct aws_allocator *allocator; + struct aws_rsa_key_vtable *vtable; + struct aws_ref_count ref_count; + + size_t key_size_in_bits; + struct aws_byte_buf priv; + struct aws_byte_buf pub; + + void *impl; +}; + +void aws_rsa_key_pair_base_clean_up(struct aws_rsa_key_pair *key_pair); + +/* + * RSAPrivateKey as defined in RFC 8017 (aka PKCS1 format): + * version Version, + * modulus INTEGER, -- n + * publicExponent INTEGER, -- e + * privateExponent INTEGER, -- d + * prime1 INTEGER, -- p + * prime2 INTEGER, -- q + * exponent1 INTEGER, -- d mod (p-1) + * exponent2 INTEGER, -- d mod (q-1) + * coefficient INTEGER, -- (inverse of q) mod p + * otherPrimeInfos OtherPrimeInfos OPTIONAL + * Note: otherPrimeInfos is used for >2 primes RSA cases, which are not very + * common and currently not supported by CRT. Version == 0 indicates 2 prime + * case and version == 1 indicates >2 prime case, hence in practice it will + * always be 0. + */ +struct aws_rsa_private_key_pkcs1 { + /* + * Note: all cursors here point to bignum data for underlying RSA numbers. + * Struct itself does not own the data and points to where ever the data was + * decoded from. + */ + int version; + struct aws_byte_cursor modulus; + struct aws_byte_cursor publicExponent; + struct aws_byte_cursor privateExponent; + struct aws_byte_cursor prime1; + struct aws_byte_cursor prime2; + struct aws_byte_cursor exponent1; + struct aws_byte_cursor exponent2; + struct aws_byte_cursor coefficient; +}; + +AWS_CAL_API int aws_der_decoder_load_private_rsa_pkcs1( + struct aws_der_decoder *decoder, + struct aws_rsa_private_key_pkcs1 *out); + +/* +* RSAPublicKey as defined in RFC 8017 (aka PKCS1 format): + modulus INTEGER, -- n + publicExponent INTEGER -- e +*/ +struct aws_rsa_public_key_pkcs1 { + /* + * Note: all cursors here point to bignum data for underlying RSA numbers. + * Struct itself does not own the data and points to where ever the data was + * decoded from. + */ + struct aws_byte_cursor modulus; + struct aws_byte_cursor publicExponent; +}; + +AWS_CAL_API int aws_der_decoder_load_public_rsa_pkcs1( + struct aws_der_decoder *decoder, + struct aws_rsa_public_key_pkcs1 *out); + +/* + * Returns AWS_OP_SUCCESS if key size is supported and raises + * AWS_ERROR_INVALID_ARGUMENT otherwise. + */ +int is_valid_rsa_key_size(size_t key_size_in_bits); + +#endif /* AWS_C_CAL_PRIVATE_RSA_H */ diff --git a/contrib/restricted/aws/aws-c-cal/include/aws/cal/rsa.h b/contrib/restricted/aws/aws-c-cal/include/aws/cal/rsa.h new file mode 100644 index 00000000000..1df34f11148 --- /dev/null +++ b/contrib/restricted/aws/aws-c-cal/include/aws/cal/rsa.h @@ -0,0 +1,165 @@ +#ifndef AWS_CAL_RSA_H +#define AWS_CAL_RSA_H +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/cal/cal.h> +#include <aws/common/byte_buf.h> + +AWS_PUSH_SANE_WARNING_LEVEL + +struct aws_rsa_key_pair; + +enum aws_rsa_encryption_algorithm { + AWS_CAL_RSA_ENCRYPTION_PKCS1_5, + AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256, + AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512, +}; + +enum aws_rsa_signature_algorithm { + AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256, + AWS_CAL_RSA_SIGNATURE_PSS_SHA256, +}; + +/* + * Note: prefer using standard key sizes - 1024, 2048, 4096. + * Other key sizes will work, but which key sizes are supported may vary by + * platform. Typically, multiples of 64 should work on all platforms. + */ +enum { + AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS = 1024, + AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS = 4096, +}; + +AWS_EXTERN_C_BEGIN + +/** + * Creates an RSA public key from RSAPublicKey as defined in rfc 8017 (aka PKCS1). + * Returns a new instance of aws_rsa_key_pair if the key was successfully built. + * Otherwise returns NULL. + */ +AWS_CAL_API struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( + struct aws_allocator *allocator, + struct aws_byte_cursor key); + +/** + * Creates an RSA private key from RSAPrivateKey as defined in rfc 8017 (aka PKCS1). + * Returns a new instance of aws_rsa_key_pair if the key was successfully built. + * Otherwise returns NULL. + */ +AWS_CAL_API struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( + struct aws_allocator *allocator, + struct aws_byte_cursor key); + +/** + * Adds one to an RSA key pair's ref count. + * Returns key_pair pointer. + */ +AWS_CAL_API struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair); + +/** + * Subtracts one from an RSA key pair's ref count. If ref count reaches zero, the key pair is destroyed. + * Always returns NULL. + */ +AWS_CAL_API struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair); + +/** + * Max plaintext size that can be encrypted by the key (i.e. max data size + * supported by the key - bytes needed for padding). + */ +AWS_CAL_API size_t aws_rsa_key_pair_max_encrypt_plaintext_size( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm); + +/* + * Uses the key_pair's private key to encrypt the plaintext. The output will be + * in out. out must be large enough to to hold the ciphertext. Check + * aws_rsa_key_pair_block_length() for output upper bound. + */ +AWS_CAL_API int aws_rsa_key_pair_encrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor plaintext, + struct aws_byte_buf *out); + +/* + * Uses the key_pair's private key to decrypt the ciphertext. The output will be + * in out. out must be large enough to to hold the ciphertext. Check + * aws_rsa_key_pair_block_length() for output upper bound. + */ +AWS_CAL_API int aws_rsa_key_pair_decrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor ciphertext, + struct aws_byte_buf *out); + +/* + * Max size for a block supported by a given key pair. + */ +AWS_CAL_API size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair); + +/** + * Uses the key_pair's private key to sign message. The output will be in out. out must be large enough + * to hold the signature. Check aws_rsa_key_pair_signature_length() for the appropriate size. + * + * It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually + * something like a SHA256. + */ +AWS_CAL_API int aws_rsa_key_pair_sign_message( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_buf *out); + +/** + * Uses the key_pair's public key to verify signature of message. + * + * It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually + * something like a SHA256. + * + * returns AWS_OP_SUCCESS if the signature is valid. + * raises AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED if signature validation failed + */ +AWS_CAL_API int aws_rsa_key_pair_verify_signature( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_cursor signature); + +/* + * Max size for a signature supported by a given key pair. + */ +AWS_CAL_API size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair); + +enum aws_rsa_key_export_format { + AWS_CAL_RSA_KEY_EXPORT_PKCS1, +}; + +/* + * Get public key for the key pair. + * Inits out to a copy of key. + * Any encoding on top of that (ex. b64) is left up to user. + * Note: this function is currently not supported on windows for generated keys. + */ +AWS_CAL_API int aws_rsa_key_pair_get_public_key( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_key_export_format format, + struct aws_byte_buf *out); + +/* + * Get private key for the key pair. + * Inits out to a copy of key. + * Any encoding on top of that (ex. b64) is left up to user. + * Note: this function is currently not supported on Windows for generated keys. + */ +AWS_CAL_API int aws_rsa_key_pair_get_private_key( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_key_export_format format, + struct aws_byte_buf *out); + +AWS_EXTERN_C_END + +AWS_POP_SANE_WARNING_LEVEL + +#endif /* AWS_CAL_RSA_H */ diff --git a/contrib/restricted/aws/aws-c-cal/include/aws/cal/symmetric_cipher.h b/contrib/restricted/aws/aws-c-cal/include/aws/cal/symmetric_cipher.h index 59f44831d89..c01eab2f0dc 100644 --- a/contrib/restricted/aws/aws-c-cal/include/aws/cal/symmetric_cipher.h +++ b/contrib/restricted/aws/aws-c-cal/include/aws/cal/symmetric_cipher.h @@ -7,6 +7,8 @@ #include <aws/cal/cal.h> #include <aws/common/byte_buf.h> +AWS_PUSH_SANE_WARNING_LEVEL + #define AWS_AES_256_CIPHER_BLOCK_SIZE 16 #define AWS_AES_256_KEY_BIT_LEN 256 #define AWS_AES_256_KEY_BYTE_LEN (AWS_AES_256_KEY_BIT_LEN / 8) @@ -204,7 +206,7 @@ AWS_CAL_API int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); AWS_CAL_API struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); /** - * Gets the original intialization vector as a cursor. + * Gets the original initialization vector as a cursor. * The memory in this cursor is unsafe as it refers to the internal buffer. * This was done because the use case doesn't require fetching these during an * encryption or decryption operation and it dramatically simplifies the API. @@ -235,4 +237,6 @@ AWS_CAL_API struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws AWS_CAL_API bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL + #endif /* AWS_CAL_SYMMETRIC_CIPHER_H */ diff --git a/contrib/restricted/aws/aws-c-cal/patches/01-dlopen-musl.patch b/contrib/restricted/aws/aws-c-cal/patches/01-dlopen-musl.patch deleted file mode 100644 index d427d6f4403..00000000000 --- a/contrib/restricted/aws/aws-c-cal/patches/01-dlopen-musl.patch +++ /dev/null @@ -1,14 +0,0 @@ ---- contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c (index) -+++ contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c (working tree) -@@ -489,7 +489,11 @@ static enum aws_libcrypto_version s_resolve_libcrypto(void) { - /* Try to auto-resolve against what's linked in/process space */ - AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "searching process and loaded modules"); - void *process = dlopen(NULL, RTLD_NOW); -+#if 0 -+ // dlopen is not supported in musl. It's ok to pass NULL to s_resolve_libcrypto_symbols, -+ // as dlsym handles it well according to man. - AWS_FATAL_ASSERT(process && "Unable to load symbols from process space"); -+#endif - enum aws_libcrypto_version result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_LC, process); - if (result == AWS_LIBCRYPTO_NONE) { - AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "did not find aws-lc symbols linked"); diff --git a/contrib/restricted/aws/aws-c-cal/source/cal.c b/contrib/restricted/aws/aws-c-cal/source/cal.c index 13477c8dd3b..5b0f8ba1a69 100644 --- a/contrib/restricted/aws/aws-c-cal/source/cal.c +++ b/contrib/restricted/aws/aws-c-cal/source/cal.c @@ -19,7 +19,7 @@ static struct aws_error_info s_errors[] = { "sign a message with a public key."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, - "A key length was used for an algorithm that needs a different key length"), + "A key length was used for an algorithm that needs a different key length."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER, "An ASN.1 OID was encountered that wasn't expected or understood. Most likely, an unsupported algorithm was " @@ -29,10 +29,10 @@ static struct aws_error_info s_errors[] = { "An ASN.1 DER decoding operation failed on malformed input."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_MISMATCHED_DER_TYPE, - "An invalid DER type was requested during encoding/decoding"), + "An invalid DER type was requested during encoding/decoding."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM, - "The specified algorithim is unsupported on this platform."), + "The specified algorithm is unsupported on this platform."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM, "The input passed to a cipher algorithm was too large for that algorithm. Consider breaking the input into " @@ -40,7 +40,13 @@ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM, "A cipher material such as an initialization vector or tag was an incorrect size for the selected algorithm."), -}; + AWS_DEFINE_ERROR_INFO_CAL( + AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT, + "DER decoder does support negative integers."), + AWS_DEFINE_ERROR_INFO_CAL(AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT, "Key format is not supported."), + AWS_DEFINE_ERROR_INFO_CAL( + AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED, + "Unknown error when calling underlying Crypto library.")}; static struct aws_error_info_list s_list = { .error_list = s_errors, @@ -60,6 +66,7 @@ static struct aws_log_subject_info s_cal_log_subject_infos[] = { AWS_LS_CAL_LIBCRYPTO_RESOLVE, "libcrypto_resolve", "Subject for libcrypto symbol resolution logging."), + DEFINE_LOG_SUBJECT_INFO(AWS_LS_CAL_RSA, "rsa", "Subject for rsa cryptography specific logging."), }; static struct aws_log_subject_info_list s_cal_log_subject_list = { @@ -70,6 +77,7 @@ static struct aws_log_subject_info_list s_cal_log_subject_list = { #ifndef BYO_CRYPTO extern void aws_cal_platform_init(struct aws_allocator *allocator); extern void aws_cal_platform_clean_up(void); +extern void aws_cal_platform_thread_clean_up(void); #endif /* BYO_CRYPTO */ static bool s_cal_library_initialized = false; @@ -96,3 +104,9 @@ void aws_cal_library_clean_up(void) { aws_common_library_clean_up(); } } + +void aws_cal_thread_clean_up(void) { +#ifndef BYO_CRYPTO + aws_cal_platform_thread_clean_up(); +#endif /* BYO_CRYPTO */ +} diff --git a/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_aes.c b/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_aes.c index 8656d54d071..64dbfd43bc1 100644 --- a/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_aes.c +++ b/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_aes.c @@ -9,11 +9,18 @@ #include <CommonCrypto/CommonHMAC.h> #include <CommonCrypto/CommonSymmetricKeywrap.h> -#include "common_cryptor_spi.h" - -#if (defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && (__MAC_OS_X_VERSION_MAX_ALLOWED >= 101300 /* macOS 10.13 */)) || \ - (defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && (__IPHONE_OS_VERSION_MAX_ALLOWED >= 110000 /* iOS v11 */)) -# define USE_LATEST_CRYPTO_API 1 +#if !defined(AWS_APPSTORE_SAFE) +/* CommonCrypto does not offer public APIs for doing AES GCM. + * There are private APIs for doing it (CommonCryptoSPI.h), but App Store + * submissions that reference these private symbols will be rejected. */ + +# define SUPPORT_AES_GCM_VIA_SPI 1 +# include "common_cryptor_spi.h" + +# if (defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && (__MAC_OS_X_VERSION_MAX_ALLOWED >= 101300 /* macOS 10.13 */)) || \ + (defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && (__IPHONE_OS_VERSION_MAX_ALLOWED >= 110000 /* iOS v11 */)) +# define USE_LATEST_CRYPTO_API 1 +# endif #endif struct cc_aes_cipher { @@ -353,43 +360,45 @@ struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl( return &cc_cipher->cipher_base; } +#ifdef SUPPORT_AES_GCM_VIA_SPI + /* * Note that CCCryptorGCMFinal is deprecated in Mac 10.13. It also doesn't compare the tag with expected tag * https://opensource.apple.com/source/CommonCrypto/CommonCrypto-60118.1.1/include/CommonCryptorSPI.h.auto.html */ static CCStatus s_cc_crypto_gcm_finalize(struct _CCCryptor *encryptor_handle, uint8_t *buffer, size_t tag_length) { -#ifdef USE_LATEST_CRYPTO_API +# ifdef USE_LATEST_CRYPTO_API if (__builtin_available(macOS 10.13, iOS 11.0, *)) { return CCCryptorGCMFinalize(encryptor_handle, buffer, tag_length); } else { /* We would never hit this branch for newer macOS and iOS versions because of the __builtin_available check, so we can * suppress the compiler warning. */ -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wdeprecated-declarations" +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated-declarations" return CCCryptorGCMFinal(encryptor_handle, buffer, &tag_length); -# pragma clang diagnostic pop +# pragma clang diagnostic pop } -#else +# else return CCCryptorGCMFinal(encryptor_handle, buffer, &tag_length); -#endif +# endif } static CCCryptorStatus s_cc_cryptor_gcm_set_iv(struct _CCCryptor *encryptor_handle, uint8_t *buffer, size_t length) { -#ifdef USE_LATEST_CRYPTO_API +# ifdef USE_LATEST_CRYPTO_API if (__builtin_available(macOS 10.13, iOS 11.0, *)) { return CCCryptorGCMSetIV(encryptor_handle, buffer, length); } else { /* We would never hit this branch for newer macOS and iOS versions because of the __builtin_available check, so we can * suppress the compiler warning. */ -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wdeprecated-declarations" +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated-declarations" return CCCryptorGCMAddIV(encryptor_handle, buffer, length); -# pragma clang diagnostic pop +# pragma clang diagnostic pop } -#else +# else return CCCryptorGCMAddIV(encryptor_handle, buffer, length); -#endif +# endif } static int s_finalize_gcm_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { @@ -581,6 +590,26 @@ struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( return &cc_cipher->cipher_base; } +#else /* !SUPPORT_AES_GCM_VIA_SPI */ + +struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv, + const struct aws_byte_cursor *aad, + const struct aws_byte_cursor *tag) { + + (void)allocator; + (void)key; + (void)iv; + (void)aad; + (void)tag; + aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); + return NULL; +} + +#endif /* SUPPORT_AES_GCM_VIA_SPI */ + static int s_keywrap_encrypt_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, diff --git a/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_platform_init.c b/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_platform_init.c index decedcdafa2..f2da2805673 100644 --- a/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_platform_init.c +++ b/contrib/restricted/aws/aws-c-cal/source/darwin/commoncrypto_platform_init.c @@ -10,3 +10,5 @@ void aws_cal_platform_init(struct aws_allocator *allocator) { } void aws_cal_platform_clean_up(void) {} + +void aws_cal_platform_thread_clean_up(void) {} diff --git a/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_ecc.c b/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_ecc.c index de313f08f2a..646484449f1 100644 --- a/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_ecc.c +++ b/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_ecc.c @@ -7,13 +7,10 @@ #include <aws/cal/cal.h> #include <aws/cal/private/der.h> +#include <CoreFoundation/CoreFoundation.h> #include <Security/SecKey.h> #include <Security/Security.h> -#if !defined(AWS_OS_IOS) -# include <Security/SecSignVerifyTransform.h> -#endif - struct commoncrypto_ecc_key_pair { struct aws_ecc_key_pair key_pair; SecKeyRef priv_key_ref; @@ -25,6 +22,29 @@ static uint8_t s_preamble = 0x04; static size_t s_der_overhead = 8; +/* The hard-coded "valid" public keys. Copy/pated from one of our unit test. */ +const static uint8_t s_fake_x_ecdsa_p256[] = { + 0xd0, 0x72, 0x0d, 0xc6, 0x91, 0xaa, 0x80, 0x09, 0x6b, 0xa3, 0x2f, 0xed, 0x1c, 0xb9, 0x7c, 0x2b, + 0x62, 0x06, 0x90, 0xd0, 0x6d, 0xe0, 0x31, 0x7b, 0x86, 0x18, 0xd5, 0xce, 0x65, 0xeb, 0x72, 0x8f, +}; + +const static uint8_t s_fake_y_ecdsa_p256[] = { + 0x96, 0x81, 0xb5, 0x17, 0xb1, 0xcd, 0xa1, 0x7d, 0x0d, 0x83, 0xd3, 0x35, 0xd9, 0xc4, 0xa8, 0xa9, + 0xa9, 0xb0, 0xb1, 0xb3, 0xc7, 0x10, 0x6d, 0x8f, 0x3c, 0x72, 0xbc, 0x50, 0x93, 0xdc, 0x27, 0x5f, +}; + +const static uint8_t s_fake_x_ecdsa_p384[] = { + 0xfd, 0x3c, 0x84, 0xe5, 0x68, 0x9b, 0xed, 0x27, 0x0e, 0x60, 0x1b, 0x3d, 0x80, 0xf9, 0x0d, 0x67, + 0xa9, 0xae, 0x45, 0x1c, 0xce, 0x89, 0x0f, 0x53, 0xe5, 0x83, 0x22, 0x9a, 0xd0, 0xe2, 0xee, 0x64, + 0x56, 0x11, 0xfa, 0x99, 0x36, 0xdf, 0xa4, 0x53, 0x06, 0xec, 0x18, 0x06, 0x67, 0x74, 0xaa, 0x24, +}; + +const static uint8_t s_fake_y_ecdsa_p384[] = { + 0xb8, 0x3c, 0xa4, 0x12, 0x6c, 0xfc, 0x4c, 0x4d, 0x1d, 0x18, 0xa4, 0xb6, 0xc2, 0x1c, 0x7f, 0x69, + 0x9d, 0x51, 0x23, 0xdd, 0x9c, 0x24, 0xf6, 0x6f, 0x83, 0x38, 0x46, 0xee, 0xb5, 0x82, 0x96, 0x19, + 0x6b, 0x42, 0xec, 0x06, 0x42, 0x5d, 0xb5, 0xb7, 0x0a, 0x4b, 0x81, 0xb7, 0xfc, 0xf7, 0x05, 0xa0, +}; + static int s_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, @@ -173,15 +193,13 @@ static struct commoncrypto_ecc_key_pair *s_alloc_pair_and_init_buffers( goto error; } - memset(cc_key_pair->key_pair.key_buf.buffer, 0, cc_key_pair->key_pair.key_buf.len); - aws_byte_buf_write_u8(&cc_key_pair->key_pair.key_buf, s_preamble); if (pub_x.ptr && pub_y.ptr) { aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &pub_x); aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &pub_y); } else { - cc_key_pair->key_pair.key_buf.len += s_key_coordinate_size * 2; + aws_byte_buf_write_u8_n(&cc_key_pair->key_pair.key_buf, 0x0, s_key_coordinate_size * 2); } if (priv_key.ptr) { @@ -213,10 +231,40 @@ struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key_impl( enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key) { - struct aws_byte_cursor empty_cur; - AWS_ZERO_STRUCT(empty_cur); + /** + * We use SecCreateKeyWithData to create ECC key. Expected format for the key passed to that api is a byte buffer + * consisting of "0x04 | x | y | p", where x,y is public pair and p is private key. + * + * In this case we only have private key and we need to construct SecKey from that. + * + * We used to just pass 0,0 point for x,y, i.e. "0x04 | 0 | 0 | p". + * + * This used to work on Macs before 14, but in 14+ SecCreateKeyWithData returns error, + * which is reasonable since 0,0 is not a valid public point. + * + * To get around the issue, we use a fake public key, which is a valid public point, but not matching the private + * key as a quick workaround. + */ + struct aws_byte_cursor fake_pub_x; + AWS_ZERO_STRUCT(fake_pub_x); + struct aws_byte_cursor fake_pub_y; + AWS_ZERO_STRUCT(fake_pub_y); + switch (curve_name) { + case AWS_CAL_ECDSA_P256: + fake_pub_x = aws_byte_cursor_from_array(s_fake_x_ecdsa_p256, AWS_ARRAY_SIZE(s_fake_x_ecdsa_p256)); + fake_pub_y = aws_byte_cursor_from_array(s_fake_y_ecdsa_p256, AWS_ARRAY_SIZE(s_fake_y_ecdsa_p256)); + break; + case AWS_CAL_ECDSA_P384: + fake_pub_x = aws_byte_cursor_from_array(s_fake_x_ecdsa_p384, AWS_ARRAY_SIZE(s_fake_x_ecdsa_p384)); + fake_pub_y = aws_byte_cursor_from_array(s_fake_y_ecdsa_p384, AWS_ARRAY_SIZE(s_fake_y_ecdsa_p384)); + break; + default: + aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + return NULL; + } + struct commoncrypto_ecc_key_pair *cc_key_pair = - s_alloc_pair_and_init_buffers(allocator, curve_name, empty_cur, empty_cur, *priv_key); + s_alloc_pair_and_init_buffers(allocator, curve_name, fake_pub_x, fake_pub_y, *priv_key); if (!cc_key_pair) { return NULL; @@ -254,6 +302,10 @@ struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key_impl( goto error; } + /* Zero out the fake public keys in the key pair */ + aws_byte_buf_secure_zero(&cc_key_pair->key_pair.pub_x); + aws_byte_buf_secure_zero(&cc_key_pair->key_pair.pub_y); + CFRelease(key_attributes); CFRelease(private_key_data); @@ -336,7 +388,7 @@ error: return NULL; } -#if !defined(AWS_OS_IOS) +#if defined(AWS_OS_MACOS) struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name) { @@ -443,7 +495,6 @@ struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( goto error; } - memset(cc_key_pair->key_pair.key_buf.buffer, 0, cc_key_pair->key_pair.key_buf.len); aws_byte_buf_write_u8(&cc_key_pair->key_pair.key_buf, s_preamble); aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &pub_x); aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &pub_y); @@ -487,7 +538,7 @@ error: s_destroy_key(&cc_key_pair->key_pair); return NULL; } -#endif /* AWS_OS_IOS */ +#endif /* AWS_OS_MACOS */ struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, diff --git a/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_rsa.c b/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_rsa.c new file mode 100644 index 00000000000..c9c02ec981c --- /dev/null +++ b/contrib/restricted/aws/aws-c-cal/source/darwin/securityframework_rsa.c @@ -0,0 +1,491 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/cal/private/rsa.h> + +#include <aws/cal/cal.h> +#include <aws/common/encoding.h> + +#include <Security/SecKey.h> +#include <Security/Security.h> + +struct sec_rsa_key_pair { + struct aws_rsa_key_pair base; + CFAllocatorRef cf_allocator; + SecKeyRef priv_key_ref; + SecKeyRef pub_key_ref; +}; + +static void s_rsa_destroy_key(void *key_pair) { + if (key_pair == NULL) { + return; + } + + struct aws_rsa_key_pair *base = key_pair; + struct sec_rsa_key_pair *impl = base->impl; + + if (impl->pub_key_ref) { + CFRelease(impl->pub_key_ref); + } + + if (impl->priv_key_ref) { + CFRelease(impl->priv_key_ref); + } + + if (impl->cf_allocator) { + aws_wrapped_cf_allocator_destroy(impl->cf_allocator); + } + + aws_rsa_key_pair_base_clean_up(base); + + aws_mem_release(base->allocator, impl); +} + +/* + * Transforms security error code into crt error code and raises it as necessary. + * Docs on what security apis can throw are fairly sparse and so far in testing + * it only threw generic -50 error. So just log for now and we can add additional + * error translation later. + */ +static int s_reinterpret_sec_error_as_crt(CFErrorRef error, const char *function_name) { + if (error == NULL) { + return AWS_OP_SUCCESS; + } + + CFIndex error_code = CFErrorGetCode(error); + CFStringRef error_message = CFErrorCopyDescription(error); /* This function never returns NULL */ + + /* + * Note: CFStringGetCStringPtr returns NULL quite often. + * Refer to writeup at the start of CFString.h as to why. + * To reliably get an error message we need to use the following function + * that will copy error string into our buffer. + */ + const char *error_cstr = NULL; + char buffer[128]; + if (CFStringGetCString(error_message, buffer, 128, kCFStringEncodingUTF8)) { + error_cstr = buffer; + } + + int crt_error = AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED; + + /* + * Mac seems throws errSecVerifyFailed for any signature verification + * failures (based on testing and not review of their code). + * Which makes it impossible to distinguish between signature validation + * failure and api call failure. + * So let errSecVerifyFailed as signature validation failure, rather than a + * more generic Crypto Failure as it seems more intuitive to caller that + * signature cannot be verified, rather than something wrong with crypto (and + * in most cases crypto is working correctly, but returning non-specific error). + */ + if (error_code == errSecVerifyFailed) { + crt_error = AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED; + } + + AWS_LOGF_ERROR( + AWS_LS_CAL_RSA, + "%s() failed. CFError:%ld(%s) aws_error:%s", + function_name, + error_code, + error_cstr ? error_cstr : "", + aws_error_name(crt_error)); + + CFRelease(error_message); + + return aws_raise_error(crt_error); +} + +/* + * Maps crt encryption algo enum to Security Framework equivalent. + * Fails with AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM if mapping cannot be done for + * some reason. + * Mapped value is passed back through out variable. + */ +static int s_map_rsa_encryption_algo_to_sec(enum aws_rsa_encryption_algorithm algorithm, SecKeyAlgorithm *out) { + + switch (algorithm) { + case AWS_CAL_RSA_ENCRYPTION_PKCS1_5: + *out = kSecKeyAlgorithmRSAEncryptionPKCS1; + return AWS_OP_SUCCESS; + case AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256: + *out = kSecKeyAlgorithmRSAEncryptionOAEPSHA256; + return AWS_OP_SUCCESS; + case AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512: + *out = kSecKeyAlgorithmRSAEncryptionOAEPSHA512; + return AWS_OP_SUCCESS; + } + + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); +} + +/* + * Maps crt encryption algo enum to Security Framework equivalent. + * Fails with AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM if mapping cannot be done for + * some reason. + * Mapped value is passed back through out variable. + */ +static int s_map_rsa_signing_algo_to_sec(enum aws_rsa_signature_algorithm algorithm, SecKeyAlgorithm *out) { + + switch (algorithm) { + case AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256: + *out = kSecKeyAlgorithmRSASignatureDigestPKCS1v15SHA256; + return AWS_OP_SUCCESS; + case AWS_CAL_RSA_SIGNATURE_PSS_SHA256: +#if (defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && (__MAC_OS_X_VERSION_MAX_ALLOWED >= 101300 /* macOS 10.13 */)) || \ + (defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && (__IPHONE_OS_VERSION_MAX_ALLOWED >= 110000 /* iOS v11 */)) || \ + (defined(__TV_OS_VERSION_MAX_ALLOWED) && (__TV_OS_VERSION_MAX_ALLOWED >= 110000 /* tvos v11 */)) || \ + (defined(__WATCH_OS_VERSION_MAX_ALLOWED) && (__WATCH_OS_VERSION_MAX_ALLOWED >= 40000 /* watchos v4 */)) + if (__builtin_available(macos 10.13, ios 11.0, tvos 11.0, watchos 4.0, *)) { + *out = kSecKeyAlgorithmRSASignatureDigestPSSSHA256; + return AWS_OP_SUCCESS; + } else { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } +#else + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); +#endif + } + + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); +} + +static int s_rsa_encrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor plaintext, + struct aws_byte_buf *out) { + struct sec_rsa_key_pair *key_pair_impl = key_pair->impl; + + if (key_pair_impl->pub_key_ref == NULL) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "RSA Key Pair is missing Public Key required for encrypt operation."); + return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); + } + + SecKeyAlgorithm alg; + if (s_map_rsa_encryption_algo_to_sec(algorithm, &alg)) { + return AWS_OP_ERR; + } + + if (!SecKeyIsAlgorithmSupported(key_pair_impl->pub_key_ref, kSecKeyOperationTypeEncrypt, alg)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Algo is not supported for this operation"); + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + CFDataRef plaintext_ref = + CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, plaintext.ptr, plaintext.len, kCFAllocatorNull); + AWS_FATAL_ASSERT(plaintext_ref); + + CFErrorRef error = NULL; + CFDataRef ciphertext_ref = SecKeyCreateEncryptedData(key_pair_impl->pub_key_ref, alg, plaintext_ref, &error); + if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateEncryptedData")) { + CFRelease(error); + goto on_error; + } + + struct aws_byte_cursor ciphertext_cur = + aws_byte_cursor_from_array(CFDataGetBytePtr(ciphertext_ref), CFDataGetLength(ciphertext_ref)); + + if (aws_byte_buf_append(out, &ciphertext_cur)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto on_error; + } + + CFRelease(plaintext_ref); + CFRelease(ciphertext_ref); + return AWS_OP_SUCCESS; + +on_error: + if (plaintext_ref != NULL) { + CFRelease(plaintext_ref); + } + + if (ciphertext_ref != NULL) { + CFRelease(ciphertext_ref); + } + + return AWS_OP_ERR; +} + +static int s_rsa_decrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor ciphertext, + struct aws_byte_buf *out) { + struct sec_rsa_key_pair *key_pair_impl = key_pair->impl; + + if (key_pair_impl->priv_key_ref == NULL) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "RSA Key Pair is missing Private Key required for encrypt operation."); + return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); + } + + SecKeyAlgorithm alg; + if (s_map_rsa_encryption_algo_to_sec(algorithm, &alg)) { + return AWS_OP_ERR; + } + + if (!SecKeyIsAlgorithmSupported(key_pair_impl->priv_key_ref, kSecKeyOperationTypeDecrypt, alg)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Algo is not supported for this operation"); + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + CFDataRef ciphertext_ref = + CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, ciphertext.ptr, ciphertext.len, kCFAllocatorNull); + AWS_FATAL_ASSERT(ciphertext_ref); + + CFErrorRef error = NULL; + CFDataRef plaintext_ref = SecKeyCreateDecryptedData(key_pair_impl->priv_key_ref, alg, ciphertext_ref, &error); + if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateDecryptedData")) { + CFRelease(error); + goto on_error; + } + + struct aws_byte_cursor plaintext_cur = + aws_byte_cursor_from_array(CFDataGetBytePtr(plaintext_ref), CFDataGetLength(plaintext_ref)); + + if (aws_byte_buf_append(out, &plaintext_cur)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto on_error; + } + + CFRelease(plaintext_ref); + CFRelease(ciphertext_ref); + return AWS_OP_SUCCESS; + +on_error: + if (plaintext_ref != NULL) { + CFRelease(plaintext_ref); + } + + if (ciphertext_ref != NULL) { + CFRelease(ciphertext_ref); + } + + return AWS_OP_ERR; +} + +static int s_rsa_sign( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_buf *out) { + struct sec_rsa_key_pair *key_pair_impl = key_pair->impl; + + if (key_pair_impl->priv_key_ref == NULL) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "RSA Key Pair is missing Private Key required for sign operation."); + return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); + } + + SecKeyAlgorithm alg; + if (s_map_rsa_signing_algo_to_sec(algorithm, &alg)) { + return AWS_OP_ERR; + } + + if (!SecKeyIsAlgorithmSupported(key_pair_impl->priv_key_ref, kSecKeyOperationTypeSign, alg)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Algo is not supported for this operation"); + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + CFDataRef digest_ref = + CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, digest.ptr, digest.len, kCFAllocatorNull); + AWS_FATAL_ASSERT(digest_ref); + + CFErrorRef error = NULL; + CFDataRef signature_ref = SecKeyCreateSignature(key_pair_impl->priv_key_ref, alg, digest_ref, &error); + if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateSignature")) { + CFRelease(error); + goto on_error; + } + + struct aws_byte_cursor signature_cur = + aws_byte_cursor_from_array(CFDataGetBytePtr(signature_ref), CFDataGetLength(signature_ref)); + + if (aws_byte_buf_append(out, &signature_cur)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto on_error; + } + + CFRelease(digest_ref); + CFRelease(signature_ref); + + return AWS_OP_SUCCESS; + +on_error: + CFRelease(digest_ref); + + if (signature_ref != NULL) { + CFRelease(signature_ref); + } + + return AWS_OP_ERR; +} + +static int s_rsa_verify( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_cursor signature) { + struct sec_rsa_key_pair *key_pair_impl = key_pair->impl; + + if (key_pair_impl->pub_key_ref == NULL) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "RSA Key Pair is missing Public Key required for verify operation."); + return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); + } + + SecKeyAlgorithm alg; + if (s_map_rsa_signing_algo_to_sec(algorithm, &alg)) { + return AWS_OP_ERR; + } + + if (!SecKeyIsAlgorithmSupported(key_pair_impl->pub_key_ref, kSecKeyOperationTypeVerify, alg)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Algo is not supported for this operation"); + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + CFDataRef digest_ref = + CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, digest.ptr, digest.len, kCFAllocatorNull); + CFDataRef signature_ref = + CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, signature.ptr, signature.len, kCFAllocatorNull); + AWS_FATAL_ASSERT(digest_ref && signature_ref); + + CFErrorRef error = NULL; + Boolean result = SecKeyVerifySignature(key_pair_impl->pub_key_ref, alg, digest_ref, signature_ref, &error); + + CFRelease(digest_ref); + CFRelease(signature_ref); + if (s_reinterpret_sec_error_as_crt(error, "SecKeyVerifySignature")) { + CFRelease(error); + return AWS_OP_ERR; + } + + return result ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); +} + +static struct aws_rsa_key_vtable s_rsa_key_pair_vtable = { + .encrypt = s_rsa_encrypt, + .decrypt = s_rsa_decrypt, + .sign = s_rsa_sign, + .verify = s_rsa_verify, +}; + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor key) { + struct sec_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct sec_rsa_key_pair)); + + CFMutableDictionaryRef key_attributes = NULL; + CFDataRef private_key_data = NULL; + + aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); + key_pair_impl->base.impl = key_pair_impl; + key_pair_impl->base.allocator = allocator; + key_pair_impl->cf_allocator = aws_wrapped_cf_allocator_new(allocator); + aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.priv, allocator, key); + + private_key_data = CFDataCreate(key_pair_impl->cf_allocator, key.ptr, key.len); + AWS_FATAL_ASSERT(private_key_data); + + key_attributes = CFDictionaryCreateMutable(key_pair_impl->cf_allocator, 0, NULL, NULL); + AWS_FATAL_ASSERT(key_attributes); + + CFDictionaryAddValue(key_attributes, kSecClass, kSecClassKey); + CFDictionaryAddValue(key_attributes, kSecAttrKeyType, kSecAttrKeyTypeRSA); + CFDictionaryAddValue(key_attributes, kSecAttrKeyClass, kSecAttrKeyClassPrivate); + + CFErrorRef error = NULL; + key_pair_impl->priv_key_ref = SecKeyCreateWithData(private_key_data, key_attributes, &error); + if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateWithData")) { + CFRelease(error); + goto on_error; + } + + key_pair_impl->pub_key_ref = SecKeyCopyPublicKey(key_pair_impl->priv_key_ref); + AWS_FATAL_ASSERT(key_pair_impl->pub_key_ref); + + key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; + size_t block_size = SecKeyGetBlockSize(key_pair_impl->priv_key_ref); + + if (block_size < (AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS / 8) || + block_size > (AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS / 8)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Unsupported key size: %zu", block_size); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + goto on_error; + } + + key_pair_impl->base.key_size_in_bits = block_size * 8; + + CFRelease(key_attributes); + CFRelease(private_key_data); + + return &key_pair_impl->base; + +on_error: + if (private_key_data) { + CFRelease(private_key_data); + } + + if (key_attributes) { + CFRelease(key_attributes); + } + s_rsa_destroy_key(&key_pair_impl->base); + return NULL; +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor key) { + struct sec_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct sec_rsa_key_pair)); + + CFMutableDictionaryRef key_attributes = NULL; + CFDataRef public_key_data = NULL; + + aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); + key_pair_impl->base.impl = key_pair_impl; + key_pair_impl->base.allocator = allocator; + key_pair_impl->cf_allocator = aws_wrapped_cf_allocator_new(allocator); + aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.pub, allocator, key); + + public_key_data = CFDataCreate(key_pair_impl->cf_allocator, key.ptr, key.len); + AWS_FATAL_ASSERT(public_key_data); + + key_attributes = CFDictionaryCreateMutable(key_pair_impl->cf_allocator, 0, NULL, NULL); + AWS_FATAL_ASSERT(key_attributes); + + CFDictionaryAddValue(key_attributes, kSecClass, kSecClassKey); + CFDictionaryAddValue(key_attributes, kSecAttrKeyType, kSecAttrKeyTypeRSA); + CFDictionaryAddValue(key_attributes, kSecAttrKeyClass, kSecAttrKeyClassPublic); + + CFErrorRef error = NULL; + key_pair_impl->pub_key_ref = SecKeyCreateWithData(public_key_data, key_attributes, &error); + if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateWithData")) { + CFRelease(error); + goto on_error; + } + + key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; + size_t block_size = SecKeyGetBlockSize(key_pair_impl->pub_key_ref); + if (block_size < (AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS / 8) || + block_size > (AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS / 8)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Unsupported key size: %zu", block_size); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + goto on_error; + } + key_pair_impl->base.key_size_in_bits = block_size * 8; + + CFRelease(key_attributes); + CFRelease(public_key_data); + + return &key_pair_impl->base; + +on_error: + if (public_key_data) { + CFRelease(public_key_data); + } + + if (key_attributes) { + CFRelease(key_attributes); + } + s_rsa_destroy_key(&key_pair_impl->base); + return NULL; +} diff --git a/contrib/restricted/aws/aws-c-cal/source/der.c b/contrib/restricted/aws/aws-c-cal/source/der.c index 546a5685b5e..15fbcd7c1ea 100644 --- a/contrib/restricted/aws/aws-c-cal/source/der.c +++ b/contrib/restricted/aws/aws-c-cal/source/der.c @@ -36,11 +36,23 @@ struct der_tlv { uint8_t *value; }; -static void s_decode_tlv(struct der_tlv *tlv) { +static int s_decode_tlv(struct der_tlv *tlv) { if (tlv->tag == AWS_DER_INTEGER) { + if (tlv->length == 0) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + uint8_t first_byte = tlv->value[0]; - /* if the first byte is 0, it just denotes unsigned and should be removed */ - if (first_byte == 0x00) { + if (first_byte & 0x80) { + return aws_raise_error(AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT); + } + + /* if its multibyte int and first byte is 0, strip it since it was added + * to indicate to der that it is positive number. + * if len is 1 and first byte is 0, then the number is just zero, so + * leave it as is. + */ + if (tlv->length > 1 && first_byte == 0x00) { tlv->length -= 1; tlv->value += 1; } @@ -49,6 +61,8 @@ static void s_decode_tlv(struct der_tlv *tlv) { tlv->length -= 1; tlv->value += 1; } + + return AWS_OP_SUCCESS; } static int s_der_read_tlv(struct aws_byte_cursor *cur, struct der_tlv *tlv) { @@ -56,10 +70,10 @@ static int s_der_read_tlv(struct aws_byte_cursor *cur, struct der_tlv *tlv) { uint8_t len_bytes = 0; uint32_t len = 0; if (!aws_byte_cursor_read_u8(cur, &tag)) { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } if (!aws_byte_cursor_read_u8(cur, &len_bytes)) { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } /* if the sign bit is set, then the first byte is the number of bytes required to store * the length */ @@ -88,10 +102,16 @@ static int s_der_read_tlv(struct aws_byte_cursor *cur, struct der_tlv *tlv) { len = len_bytes; } + if (len > cur->len) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + tlv->tag = tag; tlv->length = len; tlv->value = (tag == AWS_DER_NULL) ? NULL : cur->ptr; - s_decode_tlv(tlv); + if (s_decode_tlv(tlv)) { + return AWS_OP_ERR; + } aws_byte_cursor_advance(cur, len); return AWS_OP_SUCCESS; @@ -222,7 +242,7 @@ void aws_der_encoder_destroy(struct aws_der_encoder *encoder) { aws_mem_release(encoder->allocator, encoder); } -int aws_der_encoder_write_integer(struct aws_der_encoder *encoder, struct aws_byte_cursor integer) { +int aws_der_encoder_write_unsigned_integer(struct aws_der_encoder *encoder, struct aws_byte_cursor integer) { AWS_FATAL_ASSERT(integer.len <= UINT32_MAX); struct der_tlv tlv = { .tag = AWS_DER_INTEGER, @@ -391,12 +411,13 @@ int s_parse_cursor(struct aws_der_decoder *decoder, struct aws_byte_cursor cur) while (cur.len) { struct der_tlv tlv = {0}; if (s_der_read_tlv(&cur, &tlv)) { - return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + return AWS_OP_ERR; } /* skip trailing newlines in the stream after any TLV */ while (cur.len && *cur.ptr == '\n') { aws_byte_cursor_advance(&cur, 1); } + if (aws_array_list_push_back(&decoder->tlvs, &tlv)) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } @@ -472,7 +493,7 @@ int aws_der_decoder_tlv_string(struct aws_der_decoder *decoder, struct aws_byte_ return AWS_OP_SUCCESS; } -int aws_der_decoder_tlv_integer(struct aws_der_decoder *decoder, struct aws_byte_cursor *integer) { +int aws_der_decoder_tlv_unsigned_integer(struct aws_der_decoder *decoder, struct aws_byte_cursor *integer) { struct der_tlv tlv = s_decoder_tlv(decoder); if (tlv.tag != AWS_DER_INTEGER) { return aws_raise_error(AWS_ERROR_CAL_MISMATCHED_DER_TYPE); diff --git a/contrib/restricted/aws/aws-c-cal/source/hash.c b/contrib/restricted/aws/aws-c-cal/source/hash.c index 37891277323..f6fbd3af593 100644 --- a/contrib/restricted/aws/aws-c-cal/source/hash.c +++ b/contrib/restricted/aws/aws-c-cal/source/hash.c @@ -87,7 +87,7 @@ static inline int compute_hash( struct aws_byte_buf *output, size_t truncate_to) { if (!hash) { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (aws_hash_update(hash, input)) { diff --git a/contrib/restricted/aws/aws-c-cal/source/rsa.c b/contrib/restricted/aws/aws-c-cal/source/rsa.c new file mode 100644 index 00000000000..f24107176f6 --- /dev/null +++ b/contrib/restricted/aws/aws-c-cal/source/rsa.c @@ -0,0 +1,282 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/cal/private/rsa.h> + +#include <aws/cal/cal.h> +#include <aws/cal/hash.h> +#include <aws/cal/private/der.h> + +typedef struct aws_rsa_key_pair *( + aws_rsa_key_pair_new_from_public_pkcs1_fn)(struct aws_allocator *allocator, struct aws_byte_cursor public_key); + +typedef struct aws_rsa_key_pair *( + aws_rsa_key_pair_new_from_private_pkcs1_fn)(struct aws_allocator *allocator, struct aws_byte_cursor private_key); + +#ifndef BYO_CRYPTO + +extern struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor public_key); + +extern struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor private_key); + +#else /* BYO_CRYPTO */ + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor public_key) { + (void)allocator; + (void)public_key; + abort(); +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor private_key) { + (void)allocator; + (void)private_key; + abort(); +} +#endif /* BYO_CRYPTO */ + +static aws_rsa_key_pair_new_from_public_pkcs1_fn *s_rsa_key_pair_new_from_public_key_pkcs1_fn = + aws_rsa_key_pair_new_from_public_key_pkcs1_impl; + +static aws_rsa_key_pair_new_from_private_pkcs1_fn *s_rsa_key_pair_new_from_private_key_pkcs1_fn = + aws_rsa_key_pair_new_from_private_key_pkcs1_impl; + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( + struct aws_allocator *allocator, + struct aws_byte_cursor public_key) { + return s_rsa_key_pair_new_from_public_key_pkcs1_fn(allocator, public_key); +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( + struct aws_allocator *allocator, + struct aws_byte_cursor private_key) { + return s_rsa_key_pair_new_from_private_key_pkcs1_fn(allocator, private_key); +} + +void aws_rsa_key_pair_base_clean_up(struct aws_rsa_key_pair *key_pair) { + aws_byte_buf_clean_up_secure(&key_pair->priv); + aws_byte_buf_clean_up_secure(&key_pair->pub); +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair) { + return aws_ref_count_acquire(&key_pair->ref_count); +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair) { + if (key_pair != NULL) { + aws_ref_count_release(&key_pair->ref_count); + } + return NULL; +} + +size_t aws_rsa_key_pair_max_encrypt_plaintext_size( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm) { + /* + * Per rfc8017, max size of plaintext for encrypt operation is as follows: + * PKCS1-v1_5: (key size in bytes) - 11 + * OAEP: (key size in bytes) - 2 * (hash bytes) - 2 + */ + + size_t key_size_in_bytes = key_pair->key_size_in_bits / 8; + switch (algorithm) { + case AWS_CAL_RSA_ENCRYPTION_PKCS1_5: + return key_size_in_bytes - 11; + case AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256: + return key_size_in_bytes - 2 * (256 / 8) - 2; + case AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512: + return key_size_in_bytes - 2 * (512 / 8) - 2; + default: + AWS_FATAL_ASSERT("Unsupported RSA Encryption Algorithm"); + } + + return 0; +} + +int aws_rsa_key_pair_encrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor plaintext, + struct aws_byte_buf *out) { + AWS_PRECONDITION(key_pair); + AWS_PRECONDITION(out); + + if (AWS_UNLIKELY(aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) < plaintext.len)) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Unexpected buffer size. For RSA, ciphertext must not exceed block size"); + return aws_raise_error(AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM); + } + + return key_pair->vtable->encrypt(key_pair, algorithm, plaintext, out); +} + +AWS_CAL_API int aws_rsa_key_pair_decrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor ciphertext, + struct aws_byte_buf *out) { + AWS_PRECONDITION(key_pair); + AWS_PRECONDITION(out); + + if (AWS_UNLIKELY(ciphertext.len != (key_pair->key_size_in_bits / 8))) { + AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Unexpected buffer size. For RSA, ciphertext is expected to match block size."); + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + return key_pair->vtable->decrypt(key_pair, algorithm, ciphertext, out); +} + +int aws_rsa_key_pair_sign_message( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_buf *out) { + AWS_PRECONDITION(key_pair); + AWS_PRECONDITION(out); + + AWS_FATAL_ASSERT( + algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 || algorithm == AWS_CAL_RSA_SIGNATURE_PSS_SHA256); + + if (digest.len > AWS_SHA256_LEN) { + AWS_LOGF_ERROR( + AWS_LS_CAL_RSA, "Unexpected digest size. For RSA, digest length is bound by max size of hash function"); + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + return key_pair->vtable->sign(key_pair, algorithm, digest, out); +} + +int aws_rsa_key_pair_verify_signature( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_cursor signature) { + AWS_PRECONDITION(key_pair); + + return key_pair->vtable->verify(key_pair, algorithm, digest, signature); +} + +size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair) { + AWS_PRECONDITION(key_pair); + return key_pair->key_size_in_bits / 8; +} + +size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair) { + AWS_PRECONDITION(key_pair); + return key_pair->key_size_in_bits / 8; +} + +int aws_rsa_key_pair_get_public_key( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_key_export_format format, + struct aws_byte_buf *out) { + (void)format; /* ignore format for now, since only pkcs1 is supported. */ + AWS_PRECONDITION(key_pair); + AWS_PRECONDITION(out); + + if (key_pair->pub.len == 0) { + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); + } + + aws_byte_buf_init_copy(out, key_pair->allocator, &key_pair->pub); + return AWS_OP_SUCCESS; +} + +int aws_rsa_key_pair_get_private_key( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_key_export_format format, + struct aws_byte_buf *out) { + (void)format; /* ignore format for now, since only pkcs1 is supported. */ + AWS_PRECONDITION(key_pair); + AWS_PRECONDITION(out); + + if (key_pair->priv.len == 0) { + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); + } + + aws_byte_buf_init_copy(out, key_pair->allocator, &key_pair->priv); + return AWS_OP_SUCCESS; +} + +int aws_der_decoder_load_private_rsa_pkcs1(struct aws_der_decoder *decoder, struct aws_rsa_private_key_pkcs1 *out) { + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_type(decoder) != AWS_DER_SEQUENCE) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + struct aws_byte_cursor version_cur; + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &version_cur)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (version_cur.len != 1 || version_cur.ptr[0] != 0) { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT); + } + out->version = 0; + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &(out->modulus))) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->publicExponent)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->privateExponent)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->prime1)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->prime2)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->exponent1)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->exponent2)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->coefficient)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + return AWS_OP_SUCCESS; +} + +int aws_der_decoder_load_public_rsa_pkcs1(struct aws_der_decoder *decoder, struct aws_rsa_public_key_pkcs1 *out) { + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_type(decoder) != AWS_DER_SEQUENCE) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &(out->modulus))) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->publicExponent)) { + return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); + } + + return AWS_OP_SUCCESS; +} + +int is_valid_rsa_key_size(size_t key_size_in_bits) { + if (key_size_in_bits < AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS || + key_size_in_bits > AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS || key_size_in_bits % 8 != 0) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + return AWS_OP_SUCCESS; +} diff --git a/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c index 78ba7a9ee86..2c6c796af82 100644 --- a/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c +++ b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_aes.c @@ -4,6 +4,7 @@ */ #include <aws/cal/private/symmetric_cipher_priv.h> +#define OPENSSL_SUPPRESS_DEPRECATED #include <openssl/evp.h> struct openssl_aes_cipher { diff --git a/contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c index 9a1d43e3d58..60c26af9dd6 100644 --- a/contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c +++ b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_platform_init.c @@ -13,6 +13,15 @@ #include <aws/cal/private/opensslcrypto_common.h> +/* + * OpenSSL 3 has a large amount of interface changes and many of the functions used + * throughout aws-c-cal have become deprecated. + * Lets disable deprecation warnings, so that we can atleast run CI, until we + * can move over to new functions. + */ +#define OPENSSL_SUPPRESS_DEPRECATED +#include <openssl/crypto.h> + static struct openssl_hmac_ctx_table hmac_ctx_table; static struct openssl_evp_md_ctx_table evp_md_ctx_table; @@ -21,23 +30,35 @@ struct openssl_evp_md_ctx_table *g_aws_openssl_evp_md_ctx_table = NULL; static struct aws_allocator *s_libcrypto_allocator = NULL; +#if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) +# define OPENSSL_IS_OPENSSL +#endif + /* weak refs to libcrypto functions to force them to at least try to link * and avoid dead-stripping */ -#if defined(OPENSSL_IS_AWSLC) +#if defined(OPENSSL_IS_AWSLC) || defined(OPENSSL_IS_BORINGSSL) extern HMAC_CTX *HMAC_CTX_new(void) __attribute__((weak, used)); extern void HMAC_CTX_free(HMAC_CTX *) __attribute__((weak, used)); -extern void HMAC_CTX_reset(HMAC_CTX *) __attribute__((weak, used)); extern void HMAC_CTX_init(HMAC_CTX *) __attribute__((weak, used)); extern void HMAC_CTX_cleanup(HMAC_CTX *) __attribute__((weak, used)); extern int HMAC_Update(HMAC_CTX *, const unsigned char *, size_t) __attribute__((weak, used)); extern int HMAC_Final(HMAC_CTX *, unsigned char *, unsigned int *) __attribute__((weak, used)); extern int HMAC_Init_ex(HMAC_CTX *, const void *, size_t, const EVP_MD *, ENGINE *) __attribute__((weak, used)); + +static int s_hmac_init_ex_bssl(HMAC_CTX *ctx, const void *key, size_t key_len, const EVP_MD *md, ENGINE *impl) { + AWS_PRECONDITION(ctx); + + int (*init_ex_pt)(HMAC_CTX *, const void *, size_t, const EVP_MD *, ENGINE *) = (int (*)( + HMAC_CTX *, const void *, size_t, const EVP_MD *, ENGINE *))g_aws_openssl_hmac_ctx_table->impl.init_ex_fn; + + return init_ex_pt(ctx, key, key_len, md, impl); +} + #else /* 1.1 */ extern HMAC_CTX *HMAC_CTX_new(void) __attribute__((weak, used)); extern void HMAC_CTX_free(HMAC_CTX *) __attribute__((weak, used)); -extern int HMAC_CTX_reset(HMAC_CTX *) __attribute__((weak, used)); /* 1.0.2 */ extern void HMAC_CTX_init(HMAC_CTX *) __attribute__((weak, used)); @@ -48,6 +69,23 @@ extern int HMAC_Update(HMAC_CTX *, const unsigned char *, size_t) __attribute__( extern int HMAC_Final(HMAC_CTX *, unsigned char *, unsigned int *) __attribute__((weak, used)); extern int HMAC_Init_ex(HMAC_CTX *, const void *, int, const EVP_MD *, ENGINE *) __attribute__((weak, used)); +static int s_hmac_init_ex_openssl(HMAC_CTX *ctx, const void *key, size_t key_len, const EVP_MD *md, ENGINE *impl) { + AWS_PRECONDITION(ctx); + if (key_len > INT_MAX) { + return 0; + } + + /*Note: unlike aws-lc and boringssl, openssl 1.1.1 and 1.0.2 take int as key + len arg. */ + int (*init_ex_ptr)(HMAC_CTX *, const void *, int, const EVP_MD *, ENGINE *) = + (int (*)(HMAC_CTX *, const void *, int, const EVP_MD *, ENGINE *))g_aws_openssl_hmac_ctx_table->impl.init_ex_fn; + + return init_ex_ptr(ctx, key, (int)key_len, md, impl); +} + +#endif /* !OPENSSL_IS_AWSLC && !OPENSSL_IS_BORINGSSL*/ + +#if !defined(OPENSSL_IS_AWSLC) /* libcrypto 1.1 stub for init */ static void s_hmac_ctx_init_noop(HMAC_CTX *ctx) { (void)ctx; @@ -57,7 +95,9 @@ static void s_hmac_ctx_init_noop(HMAC_CTX *ctx) { static void s_hmac_ctx_clean_up_noop(HMAC_CTX *ctx) { (void)ctx; } +#endif +#if defined(OPENSSL_IS_OPENSSL) /* libcrypto 1.0 shim for new */ static HMAC_CTX *s_hmac_ctx_new(void) { AWS_PRECONDITION( @@ -79,18 +119,6 @@ static void s_hmac_ctx_free(HMAC_CTX *ctx) { aws_mem_release(s_libcrypto_allocator, ctx); } -/* libcrypto 1.0 shim for reset, matches HMAC_CTX_reset semantics */ -static int s_hmac_ctx_reset(HMAC_CTX *ctx) { - AWS_PRECONDITION(ctx); - AWS_PRECONDITION( - g_aws_openssl_hmac_ctx_table->init_fn != s_hmac_ctx_init_noop && - g_aws_openssl_hmac_ctx_table->clean_up_fn != s_hmac_ctx_clean_up_noop && - "libcrypto 1.0 reset called on libcrypto 1.1 vtable"); - g_aws_openssl_hmac_ctx_table->clean_up_fn(ctx); - g_aws_openssl_hmac_ctx_table->init_fn(ctx); - return 1; -} - #endif /* !OPENSSL_IS_AWSLC */ enum aws_libcrypto_version { @@ -98,15 +126,16 @@ enum aws_libcrypto_version { AWS_LIBCRYPTO_1_0_2, AWS_LIBCRYPTO_1_1_1, AWS_LIBCRYPTO_LC, -} s_libcrypto_version = AWS_LIBCRYPTO_NONE; + AWS_LIBCRYPTO_BORINGSSL +}; bool s_resolve_hmac_102(void *module) { -#if !defined(OPENSSL_IS_AWSLC) +#if defined(OPENSSL_IS_OPENSSL) hmac_ctx_init init_fn = (hmac_ctx_init)HMAC_CTX_init; hmac_ctx_clean_up clean_up_fn = (hmac_ctx_clean_up)HMAC_CTX_cleanup; - hmac_ctx_update update_fn = (hmac_ctx_update)HMAC_Update; - hmac_ctx_final final_fn = (hmac_ctx_final)HMAC_Final; - hmac_ctx_init_ex init_ex_fn = (hmac_ctx_init_ex)HMAC_Init_ex; + hmac_update update_fn = (hmac_update)HMAC_Update; + hmac_final final_fn = (hmac_final)HMAC_Final; + hmac_init_ex init_ex_fn = (hmac_init_ex)HMAC_Init_ex; /* were symbols bound by static linking? */ bool has_102_symbols = init_fn && clean_up_fn && update_fn && final_fn && init_ex_fn; @@ -126,7 +155,6 @@ bool s_resolve_hmac_102(void *module) { if (init_fn) { hmac_ctx_table.new_fn = (hmac_ctx_new)s_hmac_ctx_new; - hmac_ctx_table.reset_fn = (hmac_ctx_reset)s_hmac_ctx_reset; hmac_ctx_table.free_fn = s_hmac_ctx_free; hmac_ctx_table.init_fn = init_fn; hmac_ctx_table.clean_up_fn = clean_up_fn; @@ -141,22 +169,20 @@ bool s_resolve_hmac_102(void *module) { } bool s_resolve_hmac_111(void *module) { -#if !defined(OPENSSL_IS_AWSLC) +#if defined(OPENSSL_IS_OPENSSL) hmac_ctx_new new_fn = (hmac_ctx_new)HMAC_CTX_new; hmac_ctx_free free_fn = (hmac_ctx_free)HMAC_CTX_free; - hmac_ctx_reset reset_fn = (hmac_ctx_reset)HMAC_CTX_reset; - hmac_ctx_update update_fn = (hmac_ctx_update)HMAC_Update; - hmac_ctx_final final_fn = (hmac_ctx_final)HMAC_Final; - hmac_ctx_init_ex init_ex_fn = (hmac_ctx_init_ex)HMAC_Init_ex; + hmac_update update_fn = (hmac_update)HMAC_Update; + hmac_final final_fn = (hmac_final)HMAC_Final; + hmac_init_ex init_ex_fn = (hmac_init_ex)HMAC_Init_ex; /* were symbols bound by static linking? */ - bool has_111_symbols = new_fn && free_fn && update_fn && final_fn && init_ex_fn && reset_fn; + bool has_111_symbols = new_fn && free_fn && update_fn && final_fn && init_ex_fn; if (has_111_symbols) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found static libcrypto 1.1.1 HMAC symbols"); } else { *(void **)(&new_fn) = dlsym(module, "HMAC_CTX_new"); - *(void **)(&reset_fn) = dlsym(module, "HMAC_CTX_reset"); *(void **)(&free_fn) = dlsym(module, "HMAC_CTX_free"); *(void **)(&update_fn) = dlsym(module, "HMAC_Update"); *(void **)(&final_fn) = dlsym(module, "HMAC_Final"); @@ -168,13 +194,13 @@ bool s_resolve_hmac_111(void *module) { if (new_fn) { hmac_ctx_table.new_fn = new_fn; - hmac_ctx_table.reset_fn = reset_fn; hmac_ctx_table.free_fn = free_fn; hmac_ctx_table.init_fn = s_hmac_ctx_init_noop; hmac_ctx_table.clean_up_fn = s_hmac_ctx_clean_up_noop; hmac_ctx_table.update_fn = update_fn; hmac_ctx_table.final_fn = final_fn; - hmac_ctx_table.init_ex_fn = init_ex_fn; + hmac_ctx_table.init_ex_fn = s_hmac_init_ex_openssl; + hmac_ctx_table.impl.init_ex_fn = (crypto_generic_fn_ptr)init_ex_fn; g_aws_openssl_hmac_ctx_table = &hmac_ctx_table; return true; } @@ -188,13 +214,12 @@ bool s_resolve_hmac_lc(void *module) { hmac_ctx_clean_up clean_up_fn = (hmac_ctx_clean_up)HMAC_CTX_cleanup; hmac_ctx_new new_fn = (hmac_ctx_new)HMAC_CTX_new; hmac_ctx_free free_fn = (hmac_ctx_free)HMAC_CTX_free; - hmac_ctx_reset reset_fn = (hmac_ctx_reset)HMAC_CTX_reset; - hmac_ctx_update update_fn = (hmac_ctx_update)HMAC_Update; - hmac_ctx_final final_fn = (hmac_ctx_final)HMAC_Final; - hmac_ctx_init_ex init_ex_fn = (hmac_ctx_init_ex)HMAC_Init_ex; + hmac_update update_fn = (hmac_update)HMAC_Update; + hmac_final final_fn = (hmac_final)HMAC_Final; + hmac_init_ex init_ex_fn = (hmac_init_ex)HMAC_Init_ex; /* were symbols bound by static linking? */ - bool has_awslc_symbols = new_fn && free_fn && update_fn && final_fn && init_fn && init_ex_fn && reset_fn; + bool has_awslc_symbols = new_fn && free_fn && update_fn && final_fn && init_fn && init_ex_fn; /* If symbols aren't already found, try to find the requested version */ /* when built as a shared lib, and multiple versions of libcrypto are possibly @@ -203,7 +228,6 @@ bool s_resolve_hmac_lc(void *module) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found static aws-lc HMAC symbols"); } else { *(void **)(&new_fn) = dlsym(module, "HMAC_CTX_new"); - *(void **)(&reset_fn) = dlsym(module, "HMAC_CTX_reset"); *(void **)(&free_fn) = dlsym(module, "HMAC_CTX_free"); *(void **)(&update_fn) = dlsym(module, "HMAC_Update"); *(void **)(&final_fn) = dlsym(module, "HMAC_Final"); @@ -216,13 +240,53 @@ bool s_resolve_hmac_lc(void *module) { if (new_fn) { /* Fill out the vtable for the requested version */ hmac_ctx_table.new_fn = new_fn; - hmac_ctx_table.reset_fn = reset_fn; hmac_ctx_table.free_fn = free_fn; hmac_ctx_table.init_fn = init_fn; hmac_ctx_table.clean_up_fn = clean_up_fn; hmac_ctx_table.update_fn = update_fn; hmac_ctx_table.final_fn = final_fn; - hmac_ctx_table.init_ex_fn = init_ex_fn; + hmac_ctx_table.init_ex_fn = s_hmac_init_ex_bssl; + hmac_ctx_table.impl.init_ex_fn = (crypto_generic_fn_ptr)init_ex_fn; + g_aws_openssl_hmac_ctx_table = &hmac_ctx_table; + return true; + } +#endif + return false; +} + +bool s_resolve_hmac_boringssl(void *module) { +#if defined(OPENSSL_IS_BORINGSSL) + hmac_ctx_new new_fn = (hmac_ctx_new)HMAC_CTX_new; + hmac_ctx_free free_fn = (hmac_ctx_free)HMAC_CTX_free; + hmac_update update_fn = (hmac_update)HMAC_Update; + hmac_final final_fn = (hmac_final)HMAC_Final; + hmac_init_ex init_ex_fn = (hmac_init_ex)HMAC_Init_ex; + + /* were symbols bound by static linking? */ + bool has_bssl_symbols = new_fn && free_fn && update_fn && final_fn && init_ex_fn; + + if (has_bssl_symbols) { + AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found static boringssl HMAC symbols"); + } else { + *(void **)(&new_fn) = dlsym(module, "HMAC_CTX_new"); + *(void **)(&free_fn) = dlsym(module, "HMAC_CTX_free"); + *(void **)(&update_fn) = dlsym(module, "HMAC_Update"); + *(void **)(&final_fn) = dlsym(module, "HMAC_Final"); + *(void **)(&init_ex_fn) = dlsym(module, "HMAC_Init_ex"); + if (new_fn) { + AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found dynamic boringssl HMAC symbols"); + } + } + + if (new_fn) { + hmac_ctx_table.new_fn = new_fn; + hmac_ctx_table.free_fn = free_fn; + hmac_ctx_table.init_fn = s_hmac_ctx_init_noop; + hmac_ctx_table.clean_up_fn = s_hmac_ctx_clean_up_noop; + hmac_ctx_table.update_fn = update_fn; + hmac_ctx_table.final_fn = final_fn; + hmac_ctx_table.init_ex_fn = s_hmac_init_ex_bssl; + hmac_ctx_table.impl.init_ex_fn = (crypto_generic_fn_ptr)init_ex_fn; g_aws_openssl_hmac_ctx_table = &hmac_ctx_table; return true; } @@ -238,6 +302,8 @@ static enum aws_libcrypto_version s_resolve_libcrypto_hmac(enum aws_libcrypto_ve return s_resolve_hmac_111(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_1_0_2: return s_resolve_hmac_102(module) ? version : AWS_LIBCRYPTO_NONE; + case AWS_LIBCRYPTO_BORINGSSL: + return s_resolve_hmac_boringssl(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_NONE: AWS_FATAL_ASSERT(!"Attempted to resolve invalid libcrypto HMAC API version AWS_LIBCRYPTO_NONE"); } @@ -386,6 +452,14 @@ bool s_resolve_md_lc(void *module) { return false; } +bool s_resolve_md_boringssl(void *module) { +#if !defined(OPENSSL_IS_AWSLC) + return s_resolve_md_111(module); +#else + return false; +#endif +} + static enum aws_libcrypto_version s_resolve_libcrypto_md(enum aws_libcrypto_version version, void *module) { switch (version) { case AWS_LIBCRYPTO_LC: @@ -394,6 +468,8 @@ static enum aws_libcrypto_version s_resolve_libcrypto_md(enum aws_libcrypto_vers return s_resolve_md_111(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_1_0_2: return s_resolve_md_102(module) ? version : AWS_LIBCRYPTO_NONE; + case AWS_LIBCRYPTO_BORINGSSL: + return s_resolve_md_boringssl(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_NONE: AWS_FATAL_ASSERT(!"Attempted to resolve invalid libcrypto MD API version AWS_LIBCRYPTO_NONE"); } @@ -479,31 +555,26 @@ static enum aws_libcrypto_version s_resolve_libcrypto_lib(void) { return AWS_LIBCRYPTO_NONE; } -static void *s_libcrypto_module = NULL; - static enum aws_libcrypto_version s_resolve_libcrypto(void) { - if (s_libcrypto_version != AWS_LIBCRYPTO_NONE) { - return s_libcrypto_version; - } - /* Try to auto-resolve against what's linked in/process space */ AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "searching process and loaded modules"); void *process = dlopen(NULL, RTLD_NOW); -#if 0 - // dlopen is not supported in musl. It's ok to pass NULL to s_resolve_libcrypto_symbols, - // as dlsym handles it well according to man. - AWS_FATAL_ASSERT(process && "Unable to load symbols from process space"); -#endif enum aws_libcrypto_version result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_LC, process); if (result == AWS_LIBCRYPTO_NONE) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "did not find aws-lc symbols linked"); + result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_BORINGSSL, process); + } + if (result == AWS_LIBCRYPTO_NONE) { + AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "did not find boringssl symbols linked"); result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_1_0_2, process); } if (result == AWS_LIBCRYPTO_NONE) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "did not find libcrypto 1.0.2 symbols linked"); result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_1_1_1, process); } - dlclose(process); + if (process) { + dlclose(process); + } if (result == AWS_LIBCRYPTO_NONE) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "did not find libcrypto 1.1.1 symbols linked"); @@ -523,7 +594,7 @@ static enum aws_libcrypto_version s_resolve_libcrypto(void) { #endif /* Openssl 1.0.x requires special handling for its locking callbacks or else it's not thread safe */ -#if !defined(OPENSSL_IS_AWSLC) +#if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) static struct aws_mutex *s_libcrypto_locks = NULL; static void s_locking_fn(int mode, int n, const char *unused0, int unused1) { @@ -550,7 +621,7 @@ void aws_cal_platform_init(struct aws_allocator *allocator) { s_libcrypto_allocator = allocator; -#if !defined(OPENSSL_IS_AWSLC) +#if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) /* Ensure that libcrypto 1.0.2 has working locking mechanisms. This code is macro'ed * by libcrypto to be a no-op on 1.1.1 */ if (!CRYPTO_get_locking_callback()) { @@ -572,8 +643,37 @@ void aws_cal_platform_init(struct aws_allocator *allocator) { #endif } +/* + * Shutdown any resources before unloading CRT (ex. dlclose). + * This is currently aws-lc specific. + * Ex. why we need it: + * aws-lc uses thread local data extensively and registers thread atexit + * callback to clean it up. + * there are cases where crt gets dlopen'ed and then dlclose'ed within a larger program + * (ex. nodejs workers). + * with glibc, dlclose actually removes symbols from global space (musl does not). + * once crt is unloaded, thread atexit will no longer point at a valid aws-lc + * symbol and will happily crash when thread is closed. + * AWSLC_thread_local_shutdown was added by aws-lc to let teams remove thread + * local data manually before lib is unloaded. + * We can't call AWSLC_thread_local_shutdown in cal cleanup because it renders + * aws-lc unusable and there is no way to reinitilize aws-lc to a working state, + * i.e. everything that depends on aws-lc stops working after shutdown (ex. curl). + * So instead rely on GCC/Clang destructor extension to shutdown right before + * crt gets unloaded. Does not work on msvc, but thats a bridge we can cross at + * a later date (since we dont support aws-lc on win right now) + * TODO: do already init'ed check on lc similar to what we do for s2n, so we + * only shutdown when we initialized aws-lc. currently not possible because + * there is no way to check that aws-lc has been initialized. + */ +void __attribute__((destructor)) s_cal_crypto_shutdown(void) { +#if defined(OPENSSL_IS_AWSLC) + AWSLC_thread_local_shutdown(); +#endif +} + void aws_cal_platform_clean_up(void) { -#if !defined(OPENSSL_IS_AWSLC) +#if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) if (CRYPTO_get_locking_callback() == s_locking_fn) { CRYPTO_set_locking_callback(NULL); size_t lock_count = (size_t)CRYPTO_num_locks(); @@ -588,12 +688,19 @@ void aws_cal_platform_clean_up(void) { } #endif - if (s_libcrypto_module) { - dlclose(s_libcrypto_module); - } +#if defined(OPENSSL_IS_AWSLC) + AWSLC_thread_local_clear(); +#endif s_libcrypto_allocator = NULL; } + +void aws_cal_platform_thread_clean_up(void) { +#if defined(OPENSSL_IS_AWSLC) + AWSLC_thread_local_clear(); +#endif +} + #if !defined(__GNUC__) || (__GNUC__ >= 4 && __GNUC_MINOR__ > 1) # pragma GCC diagnostic pop #endif diff --git a/contrib/restricted/aws/aws-c-cal/source/unix/openssl_rsa.c b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_rsa.c new file mode 100644 index 00000000000..9d891677558 --- /dev/null +++ b/contrib/restricted/aws/aws-c-cal/source/unix/openssl_rsa.c @@ -0,0 +1,450 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/cal/private/opensslcrypto_common.h> +#include <aws/cal/private/rsa.h> + +#include <aws/cal/cal.h> +#include <aws/common/encoding.h> + +#define OPENSSL_SUPPRESS_DEPRECATED +#include <openssl/err.h> +#include <openssl/evp.h> + +#if defined(OPENSSL_IS_OPENSSL) +/*Error defines were part of evp.h in 1.0.x and were moved to evperr.h in 1.1.0*/ +# if OPENSSL_VERSION_NUMBER >= 0x10100000L +# include <openssl/evperr.h> +# endif +#else +# error #include <openssl/evp_errors.h> +#endif + +#include <openssl/rsa.h> + +struct lc_rsa_key_pair { + struct aws_rsa_key_pair base; + EVP_PKEY *key; +}; + +static void s_rsa_destroy_key(void *key_pair) { + if (key_pair == NULL) { + return; + } + + struct aws_rsa_key_pair *base = key_pair; + struct lc_rsa_key_pair *impl = base->impl; + + if (impl->key != NULL) { + EVP_PKEY_free(impl->key); + } + + aws_rsa_key_pair_base_clean_up(base); + + aws_mem_release(base->allocator, impl); +} + +/* + * Transforms evp error code into crt error code and raises it as necessary. + * All evp functions follow the same: + * >= 1 for success + * <= 0 for failure + * -2 always indicates incorrect algo for operation + */ +static int s_reinterpret_evp_error_as_crt(int evp_error, const char *function_name) { + if (evp_error > 0) { + return AWS_OP_SUCCESS; + } + + /* AWS-LC/BoringSSL error code is uint32_t, but OpenSSL uses unsigned long. */ +#if defined(OPENSSL_IS_OPENSSL) + uint32_t error = ERR_peek_error(); +#else + unsigned long error = ERR_peek_error(); +#endif + + int crt_error = AWS_OP_ERR; + const char *error_message = ERR_reason_error_string(error); + + if (evp_error == -2) { + crt_error = AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM; + goto on_error; + } + + if (ERR_GET_LIB(error) == ERR_LIB_EVP) { + switch (ERR_GET_REASON(error)) { + case EVP_R_BUFFER_TOO_SMALL: { + crt_error = AWS_ERROR_SHORT_BUFFER; + goto on_error; + } + case EVP_R_UNSUPPORTED_ALGORITHM: { + crt_error = AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM; + goto on_error; + } + } + } + + crt_error = AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED; + +on_error: + AWS_LOGF_ERROR( + AWS_LS_CAL_RSA, + "%s() failed. returned: %d extended error:%lu(%s) aws_error:%s", + function_name, + evp_error, + (unsigned long)error, + error_message == NULL ? "" : error_message, + aws_error_name(crt_error)); + + return aws_raise_error(crt_error); +} + +static int s_set_encryption_ctx_from_algo(EVP_PKEY_CTX *ctx, enum aws_rsa_encryption_algorithm algorithm) { + if (algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5) { + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING), "EVP_PKEY_CTX_set_rsa_padding")) { + return AWS_OP_ERR; + } + + } else if (algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 || algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512) { + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_OAEP_PADDING), "EVP_PKEY_CTX_set_rsa_padding")) { + return AWS_OP_ERR; + } + + const EVP_MD *md = algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 ? EVP_sha256() : EVP_sha512(); + if (s_reinterpret_evp_error_as_crt(EVP_PKEY_CTX_set_rsa_oaep_md(ctx, md), "EVP_PKEY_CTX_set_rsa_oaep_md")) { + return AWS_OP_ERR; + } + } else { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + return AWS_OP_SUCCESS; +} + +static int s_rsa_encrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor plaintext, + struct aws_byte_buf *out) { + struct lc_rsa_key_pair *key_pair_impl = key_pair->impl; + + EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(key_pair_impl->key, NULL); + if (ctx == NULL) { + return aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + } + + if (s_reinterpret_evp_error_as_crt(EVP_PKEY_encrypt_init(ctx), "EVP_PKEY_encrypt_init")) { + goto on_error; + } + + if (s_set_encryption_ctx_from_algo(ctx, algorithm)) { + goto on_error; + } + + size_t needed_buffer_len = 0; + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_encrypt(ctx, NULL, &needed_buffer_len, plaintext.ptr, plaintext.len), + "EVP_PKEY_encrypt get length")) { + goto on_error; + } + + size_t ct_len = out->capacity - out->len; + if (needed_buffer_len > ct_len) { + /* + * OpenSSL 3 seems to no longer fail if the buffer is too short. + * Instead it seems to write out enough data to fill the buffer and then + * updates the out_len to full buffer. It does not seem to corrupt + * memory after the buffer, but behavior is non-ideal. + * Let get length needed for buffer from api first and then manually ensure that + * buffer we have is big enough. + */ + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto on_error; + } + + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_encrypt(ctx, out->buffer + out->len, &ct_len, plaintext.ptr, plaintext.len), "EVP_PKEY_encrypt")) { + goto on_error; + } + out->len += ct_len; + + EVP_PKEY_CTX_free(ctx); + return AWS_OP_SUCCESS; + +on_error: + EVP_PKEY_CTX_free(ctx); + return AWS_OP_ERR; +} + +static int s_rsa_decrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor ciphertext, + struct aws_byte_buf *out) { + struct lc_rsa_key_pair *key_pair_impl = key_pair->impl; + + EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(key_pair_impl->key, NULL); + if (ctx == NULL) { + return aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + } + + if (s_reinterpret_evp_error_as_crt(EVP_PKEY_decrypt_init(ctx), "EVP_PKEY_decrypt_init")) { + goto on_error; + } + + if (s_set_encryption_ctx_from_algo(ctx, algorithm)) { + goto on_error; + } + + size_t needed_buffer_len = 0; + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_decrypt(ctx, NULL, &needed_buffer_len, ciphertext.ptr, ciphertext.len), + "EVP_PKEY_decrypt get length")) { + goto on_error; + } + + size_t ct_len = out->capacity - out->len; + if (needed_buffer_len > ct_len) { + /* + * manual short buffer length check for OpenSSL 3. + * refer to encrypt implementation for more details + */ + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto on_error; + } + + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_decrypt(ctx, out->buffer + out->len, &ct_len, ciphertext.ptr, ciphertext.len), + "EVP_PKEY_decrypt")) { + goto on_error; + } + out->len += ct_len; + + EVP_PKEY_CTX_free(ctx); + return AWS_OP_SUCCESS; + +on_error: + EVP_PKEY_CTX_free(ctx); + return AWS_OP_ERR; +} + +static int s_set_signature_ctx_from_algo(EVP_PKEY_CTX *ctx, enum aws_rsa_signature_algorithm algorithm) { + if (algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256) { + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING), "EVP_PKEY_CTX_set_rsa_padding")) { + return AWS_OP_ERR; + } + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_signature_md(ctx, EVP_sha256()), "EVP_PKEY_CTX_set_signature_md")) { + return AWS_OP_ERR; + } + } else if (algorithm == AWS_CAL_RSA_SIGNATURE_PSS_SHA256) { + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PSS_PADDING), "EVP_PKEY_CTX_set_rsa_padding")) { + return AWS_OP_ERR; + } + +#if defined(OPENSSL_IS_BORINGSSL) || OPENSSL_VERSION_NUMBER < 0x10100000L + int saltlen = -1; /* RSA_PSS_SALTLEN_DIGEST not defined in BoringSSL and old versions of openssl */ +#else + int saltlen = RSA_PSS_SALTLEN_DIGEST; +#endif + + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, saltlen), "EVP_PKEY_CTX_set_rsa_pss_saltlen")) { + return AWS_OP_ERR; + } + + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_CTX_set_signature_md(ctx, EVP_sha256()), "EVP_PKEY_CTX_set_signature_md")) { + return AWS_OP_ERR; + } + } else { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + return AWS_OP_SUCCESS; +} + +static int s_rsa_sign( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_buf *out) { + struct lc_rsa_key_pair *key_pair_impl = key_pair->impl; + + EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(key_pair_impl->key, NULL); + if (ctx == NULL) { + return aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + } + + if (s_reinterpret_evp_error_as_crt(EVP_PKEY_sign_init(ctx), "EVP_PKEY_sign_init")) { + goto on_error; + } + + if (s_set_signature_ctx_from_algo(ctx, algorithm)) { + goto on_error; + } + + size_t needed_buffer_len = 0; + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_sign(ctx, NULL, &needed_buffer_len, digest.ptr, digest.len), "EVP_PKEY_sign get length")) { + goto on_error; + } + + size_t ct_len = out->capacity - out->len; + if (needed_buffer_len > ct_len) { + /* + * manual short buffer length check for OpenSSL 3. + * refer to encrypt implementation for more details. + * OpenSSL3 actually does throw an error here, but error code comes from + * component that does not exist in OpenSSL 1.x. So check manually right + * now and we can figure out how to handle it better, once we can + * properly support OpenSSL 3. + */ + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto on_error; + } + + if (s_reinterpret_evp_error_as_crt( + EVP_PKEY_sign(ctx, out->buffer + out->len, &ct_len, digest.ptr, digest.len), "EVP_PKEY_sign")) { + goto on_error; + } + out->len += ct_len; + + EVP_PKEY_CTX_free(ctx); + return AWS_OP_SUCCESS; + +on_error: + EVP_PKEY_CTX_free(ctx); + return AWS_OP_ERR; +} + +static int s_rsa_verify( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_cursor signature) { + struct lc_rsa_key_pair *key_pair_impl = key_pair->impl; + + EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(key_pair_impl->key, NULL); + if (ctx == NULL) { + return aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + } + + if (s_reinterpret_evp_error_as_crt(EVP_PKEY_verify_init(ctx), "EVP_PKEY_verify_init")) { + goto on_error; + } + + if (s_set_signature_ctx_from_algo(ctx, algorithm)) { + goto on_error; + } + + int error_code = EVP_PKEY_verify(ctx, signature.ptr, signature.len, digest.ptr, digest.len); + EVP_PKEY_CTX_free(ctx); + + /* Verify errors slightly differently from the rest of evp functions. + * 0 indicates signature does not pass verification, it's not necessarily an error. */ + if (error_code > 0) { + return AWS_OP_SUCCESS; + } else if (error_code == 0) { + return aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); + } else { + return s_reinterpret_evp_error_as_crt(error_code, "EVP_PKEY_verify"); + } + +on_error: + EVP_PKEY_CTX_free(ctx); + return AWS_OP_ERR; +} + +static struct aws_rsa_key_vtable s_rsa_key_pair_vtable = { + .encrypt = s_rsa_encrypt, + .decrypt = s_rsa_decrypt, + .sign = s_rsa_sign, + .verify = s_rsa_verify, +}; + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor key) { + struct lc_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct lc_rsa_key_pair)); + + aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); + key_pair_impl->base.impl = key_pair_impl; + key_pair_impl->base.allocator = allocator; + aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.priv, allocator, key); + + RSA *rsa = NULL; + EVP_PKEY *private_key = NULL; + + if (d2i_RSAPrivateKey(&rsa, (const uint8_t **)&key.ptr, key.len) == NULL) { + aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + goto on_error; + } + + private_key = EVP_PKEY_new(); + if (private_key == NULL || EVP_PKEY_assign_RSA(private_key, rsa) == 0) { + RSA_free(rsa); + aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + goto on_error; + } + + key_pair_impl->key = private_key; + + key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; + key_pair_impl->base.key_size_in_bits = EVP_PKEY_bits(key_pair_impl->key); + + return &key_pair_impl->base; + +on_error: + if (private_key) { + EVP_PKEY_free(private_key); + } + + s_rsa_destroy_key(&key_pair_impl->base); + return NULL; +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor key) { + struct lc_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct lc_rsa_key_pair)); + + aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); + key_pair_impl->base.impl = key_pair_impl; + key_pair_impl->base.allocator = allocator; + aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.pub, allocator, key); + + RSA *rsa = NULL; + EVP_PKEY *public_key = NULL; + + if (d2i_RSAPublicKey(&rsa, (const uint8_t **)&key.ptr, key.len) == NULL) { + aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + goto on_error; + } + + public_key = EVP_PKEY_new(); + if (public_key == NULL || EVP_PKEY_assign_RSA(public_key, rsa) == 0) { + RSA_free(rsa); + aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); + goto on_error; + } + + key_pair_impl->key = public_key; + + key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; + key_pair_impl->base.key_size_in_bits = EVP_PKEY_bits(key_pair_impl->key); + + return &key_pair_impl->base; + +on_error: + if (public_key) { + EVP_PKEY_free(public_key); + } + s_rsa_destroy_key(&key_pair_impl->base); + return NULL; +} diff --git a/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_ecc.c b/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_ecc.c index 931d705bd29..f8d33316ea8 100644 --- a/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_ecc.c +++ b/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_ecc.c @@ -7,6 +7,7 @@ #include <aws/cal/cal.h> #include <aws/cal/private/der.h> +#define OPENSSL_SUPPRESS_DEPRECATED #include <openssl/bn.h> #include <openssl/ec.h> #include <openssl/ecdsa.h> diff --git a/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_hmac.c b/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_hmac.c index 5c5cc3686c7..732ead42a3f 100644 --- a/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_hmac.c +++ b/contrib/restricted/aws/aws-c-cal/source/unix/opensslcrypto_hmac.c @@ -73,7 +73,7 @@ struct aws_hmac *aws_sha256_hmac_default_new(struct aws_allocator *allocator, co hmac->impl = ctx; hmac->good = true; - if (!g_aws_openssl_hmac_ctx_table->init_ex_fn(ctx, secret->ptr, (int)secret->len, EVP_sha256(), NULL)) { + if (!g_aws_openssl_hmac_ctx_table->init_ex_fn(ctx, secret->ptr, secret->len, EVP_sha256(), NULL)) { s_destroy(hmac); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; diff --git a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_aes.c b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_aes.c index aeb646e66a3..0229de3a80d 100644 --- a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_aes.c +++ b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_aes.c @@ -277,9 +277,12 @@ static void s_clear_reusable_components(struct aws_symmetric_cipher *cipher) { } aws_byte_buf_secure_zero(&cipher_impl->overflow); - aws_byte_buf_secure_zero(&cipher_impl->working_mac_buffer); - /* windows handles this, just go ahead and tell the API it's got a length. */ - cipher_impl->working_mac_buffer.len = AWS_AES_256_CIPHER_BLOCK_SIZE; + + if (cipher_impl->working_mac_buffer.capacity != 0) { + aws_byte_buf_secure_zero(&cipher_impl->working_mac_buffer); + /* windows handles this, just go ahead and tell the API it's got a length. */ + cipher_impl->working_mac_buffer.len = AWS_AES_256_CIPHER_BLOCK_SIZE; + } } static int s_reset_cbc_cipher(struct aws_symmetric_cipher *cipher) { diff --git a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_ecc.c b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_ecc.c index a9e890d0556..268b29b5111 100644 --- a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_ecc.c +++ b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_ecc.c @@ -130,11 +130,11 @@ static int s_sign_message( struct aws_byte_cursor integer_cur = aws_byte_cursor_from_array(temp_signature_buf.buffer, coordinate_len); /* trim off the leading zero padding for DER encoding */ integer_cur = aws_byte_cursor_left_trim_pred(&integer_cur, s_trim_zeros_predicate); - aws_der_encoder_write_integer(encoder, integer_cur); + aws_der_encoder_write_unsigned_integer(encoder, integer_cur); integer_cur = aws_byte_cursor_from_array(temp_signature_buf.buffer + coordinate_len, coordinate_len); /* trim off the leading zero padding for DER encoding */ integer_cur = aws_byte_cursor_left_trim_pred(&integer_cur, s_trim_zeros_predicate); - aws_der_encoder_write_integer(encoder, integer_cur); + aws_der_encoder_write_unsigned_integer(encoder, integer_cur); aws_der_encoder_end_sequence(encoder); struct aws_byte_cursor signature_out_cur; @@ -178,8 +178,7 @@ static int s_append_coordinate( size_t leading_zero_count = coordinate_size - coordinate->len; AWS_FATAL_ASSERT(leading_zero_count + buffer->len <= buffer->capacity); - memset(buffer->buffer + buffer->len, 0, leading_zero_count); - buffer->len += leading_zero_count; + aws_byte_buf_write_u8_n(buffer, 0x0, leading_zero_count); } return aws_byte_buf_append(buffer, coordinate); @@ -216,7 +215,7 @@ static int s_verify_signature( /* there will be two coordinates. They need to be concatenated together. */ struct aws_byte_cursor coordinate; AWS_ZERO_STRUCT(coordinate); - if (aws_der_decoder_tlv_integer(decoder, &coordinate)) { + if (aws_der_decoder_tlv_unsigned_integer(decoder, &coordinate)) { aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); goto error; } @@ -230,7 +229,7 @@ static int s_verify_signature( goto error; } AWS_ZERO_STRUCT(coordinate); - if (aws_der_decoder_tlv_integer(decoder, &coordinate)) { + if (aws_der_decoder_tlv_unsigned_integer(decoder, &coordinate)) { aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); goto error; } diff --git a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_platform_init.c b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_platform_init.c index decedcdafa2..f2da2805673 100644 --- a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_platform_init.c +++ b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_platform_init.c @@ -10,3 +10,5 @@ void aws_cal_platform_init(struct aws_allocator *allocator) { } void aws_cal_platform_clean_up(void) {} + +void aws_cal_platform_thread_clean_up(void) {} diff --git a/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_rsa.c b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_rsa.c new file mode 100644 index 00000000000..d9e7c8d229f --- /dev/null +++ b/contrib/restricted/aws/aws-c-cal/source/windows/bcrypt_rsa.c @@ -0,0 +1,422 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/cal/private/rsa.h> + +#include <aws/cal/cal.h> +#include <aws/cal/private/der.h> +#include <aws/common/encoding.h> + +#define WIN32_NO_STATUS +#include <windows.h> +#undef WIN32_NO_STATUS + +#include <bcrypt.h> +#include <ntstatus.h> + +static BCRYPT_ALG_HANDLE s_rsa_alg = NULL; + +static aws_thread_once s_rsa_thread_once = AWS_THREAD_ONCE_STATIC_INIT; + +static void s_load_alg_handle(void *user_data) { + (void)user_data; + /* this function is incredibly slow, LET IT LEAK*/ + NTSTATUS status = BCryptOpenAlgorithmProvider(&s_rsa_alg, BCRYPT_RSA_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); + AWS_FATAL_ASSERT(s_rsa_alg && "BCryptOpenAlgorithmProvider() failed"); + AWS_FATAL_ASSERT(BCRYPT_SUCCESS(status)); +} + +struct bcrypt_rsa_key_pair { + struct aws_rsa_key_pair base; + BCRYPT_KEY_HANDLE key_handle; + struct aws_byte_buf key_buf; +}; + +static void s_rsa_destroy_key(void *key_pair) { + if (key_pair == NULL) { + return; + } + + struct aws_rsa_key_pair *base = key_pair; + struct bcrypt_rsa_key_pair *impl = base->impl; + + if (impl->key_handle) { + BCryptDestroyKey(impl->key_handle); + } + aws_byte_buf_clean_up_secure(&impl->key_buf); + + aws_rsa_key_pair_base_clean_up(base); + + aws_mem_release(base->allocator, impl); +} + +/* + * Transforms bcrypt error code into crt error code and raises it as necessary. + */ +static int s_reinterpret_bc_error_as_crt(NTSTATUS error, const char *function_name) { + if (BCRYPT_SUCCESS(error)) { + return AWS_OP_SUCCESS; + } + + int crt_error = AWS_OP_ERR; + switch (error) { + case STATUS_BUFFER_TOO_SMALL: { + crt_error = AWS_ERROR_SHORT_BUFFER; + goto on_error; + } + case STATUS_NOT_SUPPORTED: { + crt_error = AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM; + goto on_error; + } + } + + crt_error = AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED; + +on_error: + AWS_LOGF_ERROR( + AWS_LS_CAL_RSA, "%s() failed. returned: %X aws_error:%s", function_name, error, aws_error_name(crt_error)); + + return aws_raise_error(crt_error); +} + +static int s_check_encryption_algorithm(enum aws_rsa_encryption_algorithm algorithm) { + if (algorithm != AWS_CAL_RSA_ENCRYPTION_PKCS1_5 && algorithm != AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 && + algorithm != AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512) { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + return AWS_OP_SUCCESS; +} + +static int s_rsa_encrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor plaintext, + struct aws_byte_buf *out) { + struct bcrypt_rsa_key_pair *key_pair_impl = key_pair->impl; + + if (s_check_encryption_algorithm(algorithm)) { + return AWS_OP_ERR; + } + + BCRYPT_OAEP_PADDING_INFO padding_info_oaep = { + .pszAlgId = algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 ? BCRYPT_SHA256_ALGORITHM : BCRYPT_SHA512_ALGORITHM, + .pbLabel = NULL, + .cbLabel = 0}; + + ULONG length_written = 0; + NTSTATUS status = BCryptEncrypt( + key_pair_impl->key_handle, + plaintext.ptr, + (ULONG)plaintext.len, + algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5 ? NULL : &padding_info_oaep, + NULL, + 0, + out->buffer + out->len, + (ULONG)(out->capacity - out->len), + &length_written, + algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5 ? BCRYPT_PAD_PKCS1 : BCRYPT_PAD_OAEP); + + if (s_reinterpret_bc_error_as_crt(status, "BCryptEncrypt")) { + return AWS_OP_ERR; + } + + out->len += length_written; + return AWS_OP_SUCCESS; +} + +static int s_rsa_decrypt( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_encryption_algorithm algorithm, + struct aws_byte_cursor ciphertext, + struct aws_byte_buf *out) { + struct bcrypt_rsa_key_pair *key_pair_impl = key_pair->impl; + + /* There is a bug in old versions of BCryptDecrypt, where it does not return + * error status if out buffer is too short. So manually check that buffer is + * large enough. + */ + if ((out->capacity - out->len) < aws_rsa_key_pair_block_length(key_pair)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + if (s_check_encryption_algorithm(algorithm)) { + return AWS_OP_ERR; + } + + BCRYPT_OAEP_PADDING_INFO padding_info_oaep = { + .pszAlgId = algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 ? BCRYPT_SHA256_ALGORITHM : BCRYPT_SHA512_ALGORITHM, + .pbLabel = NULL, + .cbLabel = 0}; + + ULONG length_written = 0; + NTSTATUS status = BCryptDecrypt( + key_pair_impl->key_handle, + ciphertext.ptr, + (ULONG)ciphertext.len, + algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5 ? NULL : &padding_info_oaep, + NULL, + 0, + out->buffer + out->len, + (ULONG)(out->capacity - out->len), + &length_written, + algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5 ? BCRYPT_PAD_PKCS1 : BCRYPT_PAD_OAEP); + + if (s_reinterpret_bc_error_as_crt(status, "BCryptDecrypt")) { + return AWS_OP_ERR; + } + + out->len += length_written; + return AWS_OP_SUCCESS; +} + +union sign_padding_info { + BCRYPT_PKCS1_PADDING_INFO pkcs1; + BCRYPT_PSS_PADDING_INFO pss; +}; + +static int s_sign_padding_info_init(union sign_padding_info *info, enum aws_rsa_signature_algorithm algorithm) { + memset(info, 0, sizeof(union sign_padding_info)); + + if (algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256) { + info->pkcs1.pszAlgId = BCRYPT_SHA256_ALGORITHM; + return AWS_OP_SUCCESS; + } else if (algorithm == AWS_CAL_RSA_SIGNATURE_PSS_SHA256) { + info->pss.pszAlgId = BCRYPT_SHA256_ALGORITHM; + info->pss.cbSalt = 32; + return AWS_OP_SUCCESS; + } + + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); +} + +static int s_rsa_sign( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_buf *out) { + struct bcrypt_rsa_key_pair *key_pair_impl = key_pair->impl; + + union sign_padding_info padding_info; + if (s_sign_padding_info_init(&padding_info, algorithm)) { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + + ULONG length_written = 0; + NTSTATUS status = BCryptSignHash( + key_pair_impl->key_handle, + &padding_info, + digest.ptr, + (ULONG)digest.len, + out->buffer + out->len, + (ULONG)(out->capacity - out->len), + (ULONG *)&length_written, + algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 ? BCRYPT_PAD_PKCS1 : BCRYPT_PAD_PSS); + + if (s_reinterpret_bc_error_as_crt(status, "BCryptSignHash")) { + goto on_error; + } + + out->len += length_written; + + return AWS_OP_SUCCESS; + +on_error: + return AWS_OP_ERR; +} + +static int s_rsa_verify( + const struct aws_rsa_key_pair *key_pair, + enum aws_rsa_signature_algorithm algorithm, + struct aws_byte_cursor digest, + struct aws_byte_cursor signature) { + struct bcrypt_rsa_key_pair *key_pair_impl = key_pair->impl; + + /* BCrypt raises invalid argument if signature does not have correct size. + * Verify size here and raise appropriate error and treat all other errors + * from BCrypt (including invalid arg) in reinterp. */ + if (signature.len != aws_rsa_key_pair_signature_length(key_pair)) { + return aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); + } + + union sign_padding_info padding_info; + if (s_sign_padding_info_init(&padding_info, algorithm)) { + return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); + } + /* okay, now we've got a windows compatible signature, let's verify it. */ + NTSTATUS status = BCryptVerifySignature( + key_pair_impl->key_handle, + &padding_info, + digest.ptr, + (ULONG)digest.len, + signature.ptr, + (ULONG)signature.len, + algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 ? BCRYPT_PAD_PKCS1 : BCRYPT_PAD_PSS); + + if (status == STATUS_INVALID_SIGNATURE) { + return aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); + } + + if (s_reinterpret_bc_error_as_crt(status, "BCryptVerifySignature")) { + return AWS_OP_ERR; + } + + return AWS_OP_SUCCESS; +} + +static struct aws_rsa_key_vtable s_rsa_key_pair_vtable = { + .encrypt = s_rsa_encrypt, + .decrypt = s_rsa_decrypt, + .sign = s_rsa_sign, + .verify = s_rsa_verify, +}; + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor key) { + + aws_thread_call_once(&s_rsa_thread_once, s_load_alg_handle, NULL); + struct bcrypt_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct bcrypt_rsa_key_pair)); + + aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); + key_pair_impl->base.impl = key_pair_impl; + key_pair_impl->base.allocator = allocator; + aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.priv, allocator, key); + + struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, key); + + if (!decoder) { + goto on_error; + } + + struct aws_rsa_private_key_pkcs1 private_key_data; + AWS_ZERO_STRUCT(private_key_data); + if (aws_der_decoder_load_private_rsa_pkcs1(decoder, &private_key_data)) { + goto on_error; + } + + /* Hard to predict final blob size, so use pkcs1 key size as upper bound. */ + size_t total_buffer_size = key.len + sizeof(BCRYPT_RSAKEY_BLOB); + + aws_byte_buf_init(&key_pair_impl->key_buf, allocator, total_buffer_size); + + BCRYPT_RSAKEY_BLOB key_blob; + AWS_ZERO_STRUCT(key_blob); + key_blob.Magic = BCRYPT_RSAFULLPRIVATE_MAGIC; + key_blob.BitLength = (ULONG)private_key_data.modulus.len * 8; + key_blob.cbPublicExp = (ULONG)private_key_data.publicExponent.len; + key_blob.cbModulus = (ULONG)private_key_data.modulus.len; + key_blob.cbPrime1 = (ULONG)private_key_data.prime1.len; + key_blob.cbPrime2 = (ULONG)private_key_data.prime2.len; + + struct aws_byte_cursor header = aws_byte_cursor_from_array(&key_blob, sizeof(key_blob)); + aws_byte_buf_append(&key_pair_impl->key_buf, &header); + + LPCWSTR blob_type = BCRYPT_RSAFULLPRIVATE_BLOB; + ULONG flags = 0; + + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.publicExponent); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.modulus); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.prime1); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.prime2); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.exponent1); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.exponent2); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.coefficient); + aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.privateExponent); + + NTSTATUS status = BCryptImportKeyPair( + s_rsa_alg, + NULL, + blob_type, + &key_pair_impl->key_handle, + key_pair_impl->key_buf.buffer, + (ULONG)key_pair_impl->key_buf.len, + flags); + + if (s_reinterpret_bc_error_as_crt(status, "BCryptImportKeyPair")) { + goto on_error; + } + + key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; + key_pair_impl->base.key_size_in_bits = private_key_data.modulus.len * 8; + + aws_der_decoder_destroy(decoder); + + return &key_pair_impl->base; + +on_error: + aws_der_decoder_destroy(decoder); + s_rsa_destroy_key(&key_pair_impl->base); + return NULL; +} + +struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( + struct aws_allocator *allocator, + struct aws_byte_cursor key) { + + aws_thread_call_once(&s_rsa_thread_once, s_load_alg_handle, NULL); + struct bcrypt_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct bcrypt_rsa_key_pair)); + + aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); + key_pair_impl->base.impl = key_pair_impl; + key_pair_impl->base.allocator = allocator; + aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.pub, allocator, key); + + struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, key); + + if (!decoder) { + goto on_error; + } + + struct aws_rsa_public_key_pkcs1 public_key_data; + AWS_ZERO_STRUCT(public_key_data); + if (aws_der_decoder_load_public_rsa_pkcs1(decoder, &public_key_data)) { + goto on_error; + } + + /* Hard to predict final blob size, so use pkcs1 key size as upper bound. */ + size_t total_buffer_size = key.len + sizeof(BCRYPT_RSAKEY_BLOB); + + aws_byte_buf_init(&key_pair_impl->key_buf, allocator, total_buffer_size); + + BCRYPT_RSAKEY_BLOB key_blob; + AWS_ZERO_STRUCT(key_blob); + key_blob.Magic = BCRYPT_RSAPUBLIC_MAGIC; + key_blob.BitLength = (ULONG)public_key_data.modulus.len * 8; + key_blob.cbPublicExp = (ULONG)public_key_data.publicExponent.len; + key_blob.cbModulus = (ULONG)public_key_data.modulus.len; + + struct aws_byte_cursor header = aws_byte_cursor_from_array(&key_blob, sizeof(key_blob)); + aws_byte_buf_append(&key_pair_impl->key_buf, &header); + + LPCWSTR blob_type = BCRYPT_PUBLIC_KEY_BLOB; + ULONG flags = 0; + + aws_byte_buf_append(&key_pair_impl->key_buf, &public_key_data.publicExponent); + aws_byte_buf_append(&key_pair_impl->key_buf, &public_key_data.modulus); + + NTSTATUS status = BCryptImportKeyPair( + s_rsa_alg, + NULL, + blob_type, + &key_pair_impl->key_handle, + key_pair_impl->key_buf.buffer, + (ULONG)key_pair_impl->key_buf.len, + flags); + + if (s_reinterpret_bc_error_as_crt(status, "BCryptImportKeyPair")) { + goto on_error; + } + + key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; + key_pair_impl->base.key_size_in_bits = public_key_data.modulus.len * 8; + + aws_der_decoder_destroy(decoder); + + return &key_pair_impl->base; + +on_error: + aws_der_decoder_destroy(decoder); + s_rsa_destroy_key(&key_pair_impl->base); + return NULL; +} diff --git a/contrib/restricted/aws/aws-c-cal/ya.make b/contrib/restricted/aws/aws-c-cal/ya.make index dafca392e26..ab87627bd14 100644 --- a/contrib/restricted/aws/aws-c-cal/ya.make +++ b/contrib/restricted/aws/aws-c-cal/ya.make @@ -1,4 +1,4 @@ -# Generated by devtools/yamaker from nixpkgs 22.11. +# Generated by devtools/yamaker from nixpkgs 24.05. LIBRARY() @@ -9,9 +9,9 @@ LICENSE( LICENSE_TEXTS(.yandex_meta/licenses.list.txt) -VERSION(0.5.26) +VERSION(0.6.12) -ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-cal/archive/v0.5.26.tar.gz) +ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-cal/archive/v0.6.12.tar.gz) PEERDIR( contrib/libs/openssl @@ -30,6 +30,7 @@ CFLAGS( -DAWS_CAL_USE_IMPORT_EXPORT -DAWS_COMMON_USE_IMPORT_EXPORT -DHAVE_SYSCONF + -DINTEL_NO_ITTNOTIFY_API ) IF (OS_WINDOWS) @@ -44,6 +45,7 @@ SRCS( source/ecc.c source/hash.c source/hmac.c + source/rsa.c source/symmetric_cipher.c ) @@ -61,11 +63,13 @@ IF (OS_DARWIN) source/darwin/commoncrypto_sha1.c source/darwin/commoncrypto_sha256.c source/darwin/securityframework_ecc.c + source/darwin/securityframework_rsa.c ) ELSEIF (OS_LINUX) SRCS( source/unix/openssl_aes.c source/unix/openssl_platform_init.c + source/unix/openssl_rsa.c source/unix/opensslcrypto_ecc.c source/unix/opensslcrypto_hash.c source/unix/opensslcrypto_hmac.c @@ -77,6 +81,7 @@ ELSEIF (OS_WINDOWS) source/windows/bcrypt_hash.c source/windows/bcrypt_hmac.c source/windows/bcrypt_platform_init.c + source/windows/bcrypt_rsa.c ) ENDIF() diff --git a/contrib/restricted/aws/aws-c-common/.yandex_meta/__init__.py b/contrib/restricted/aws/aws-c-common/.yandex_meta/__init__.py index 182f44862dc..2cd57a9d597 100644 --- a/contrib/restricted/aws/aws-c-common/.yandex_meta/__init__.py +++ b/contrib/restricted/aws/aws-c-common/.yandex_meta/__init__.py @@ -46,7 +46,7 @@ def post_install(self): ) if_arm = Linkable( SRCS=[ - "source/arch/arm/asm/cpuid.c", + "source/arch/arm/auxv/cpuid.c", ] ) m.after( @@ -73,16 +73,12 @@ def post_install(self): ) # handle arch-specific CFLAGS - m.CFLAGS.remove("-DHAVE_MM256_EXTRACT_EPI64") - m.CFLAGS.remove("-DHAVE_AVX2_INTRINSICS") m.CFLAGS.remove("-DUSE_SIMD_ENCODING") m.after( "CFLAGS", Switch( ARCH_X86_64=Linkable( CFLAGS=[ - "-DHAVE_MM256_EXTRACT_EPI64", - "-DHAVE_AVX2_INTRINSICS", "-DUSE_SIMD_ENCODING", ], ) @@ -97,7 +93,11 @@ aws_c_common = CMakeNinjaNixProject( flags=["-DAWS_HAVE_EXECINFO=OFF"], copy_sources=[ "include/aws/common/*.inl", - "source/arch/arm/asm/**/*", + "include/aws/common/external/ittnotify.h", + "source/arch/arm/auxv/cpuid.c", + ], + disable_includes=[ + "legacy/ittnotify.h", ], install_targets=["aws-c-common"], post_install=post_install, diff --git a/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report b/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report index 49a57a2b66c..e2d05281f04 100644 --- a/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report +++ b/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report @@ -59,6 +59,7 @@ BELONGS ya.make include/aws/common/common.h [5:5] include/aws/common/condition_variable.h [5:5] include/aws/common/cpuid.h [4:4] + include/aws/common/cross_process_lock.h [4:4] include/aws/common/date_time.h [4:4] include/aws/common/device_random.h [4:4] include/aws/common/encoding.h [5:5] @@ -70,6 +71,7 @@ BELONGS ya.make include/aws/common/fifo_cache.h [4:4] include/aws/common/file.h [4:4] include/aws/common/hash_table.h [5:5] + include/aws/common/host_utils.h [4:4] include/aws/common/json.h [5:5] include/aws/common/lifo_cache.h [4:4] include/aws/common/linked_hash_table.h [4:4] @@ -101,6 +103,7 @@ BELONGS ya.make include/aws/common/private/dlloads.h [4:4] include/aws/common/private/hash_table_impl.h [5:5] include/aws/common/private/json_impl.h [5:5] + include/aws/common/private/system_info_priv.h [4:4] include/aws/common/private/thread_shared.h [4:4] include/aws/common/private/xml_parser_impl.h [5:5] include/aws/common/process.h [4:4] @@ -115,6 +118,7 @@ BELONGS ya.make include/aws/common/string.h [4:4] include/aws/common/string.inl [4:4] include/aws/common/system_info.h [5:5] + include/aws/common/system_resource_util.h [5:5] include/aws/common/task_scheduler.h [5:5] include/aws/common/thread.h [5:5] include/aws/common/thread_scheduler.h [4:4] @@ -126,7 +130,7 @@ BELONGS ya.make include/aws/common/zero.inl [5:5] source/allocator.c [2:2] source/allocator_sba.c [2:2] - source/arch/arm/asm/cpuid.c [2:2] + source/arch/arm/auxv/cpuid.c [2:2] source/arch/intel/asm/cpuid.c [2:2] source/arch/intel/cpuid.c [2:2] source/arch/intel/encoding_avx2.c [2:2] @@ -145,9 +149,11 @@ BELONGS ya.make source/fifo_cache.c [2:2] source/file.c [2:2] source/hash_table.c [2:2] + source/host_utils.c [2:2] source/json.c [2:2] source/lifo_cache.c [2:2] source/linked_hash_table.c [2:2] + source/linux/system_info.c [2:2] source/log_channel.c [2:2] source/log_formatter.c [2:2] source/log_writer.c [2:2] @@ -157,6 +163,7 @@ BELONGS ya.make source/memtrace.c [2:2] source/posix/clock.c [2:2] source/posix/condition_variable.c [2:2] + source/posix/cross_process_lock.c [2:2] source/posix/device_random.c [2:2] source/posix/environment.c [2:2] source/posix/file.c [2:2] @@ -164,6 +171,7 @@ BELONGS ya.make source/posix/process.c [2:2] source/posix/rw_lock.c [2:2] source/posix/system_info.c [2:2] + source/posix/system_resource_utils.c [2:2] source/posix/thread.c [2:2] source/posix/time.c [2:2] source/priority_queue.c [2:2] @@ -173,6 +181,7 @@ BELONGS ya.make source/ring_buffer.c [2:2] source/statistics.c [2:2] source/string.c [2:2] + source/system_info.c [2:2] source/task_scheduler.c [2:2] source/thread_scheduler.c [2:2] source/thread_shared.c [2:2] @@ -191,6 +200,30 @@ BELONGS ya.make Files with this license: source/posix/time.c [15:15] +KEEP COPYRIGHT_SERVICE_LABEL 6415d26683d228a47ae78c42308409b0 +BELONGS ya.make + License text: + Copyright (C) 2005-2019 Intel Corporation + SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause + Scancode info: + Original SPDX id: COPYRIGHT_SERVICE_LABEL + Score : 100.00 + Match type : COPYRIGHT + Files with this license: + THIRD-PARTY-LICENSES.txt [2:3] + include/aws/common/external/ittnotify.h [2:2] + +KEEP COPYRIGHT_SERVICE_LABEL 9db87b867e659d4a2f36a8b3f8dfbe2e +BELONGS ya.make + License text: + Copyright (c) 2019 Intel Corporation. All rights reserved. + Scancode info: + Original SPDX id: COPYRIGHT_SERVICE_LABEL + Score : 100.00 + Match type : COPYRIGHT + Files with this license: + THIRD-PARTY-LICENSES.txt [8:8] + KEEP COPYRIGHT_SERVICE_LABEL bdcf211d81a69c0f282fb7543c1a24a7 BELONGS ya.make License text: @@ -200,5 +233,6 @@ BELONGS ya.make Score : 100.00 Match type : COPYRIGHT Files with this license: - include/aws/common/external/cJSON.h [2:2] + THIRD-PARTY-LICENSES.txt [38:38] source/external/cJSON.c [2:2] + source/external/cJSON.h [2:2] diff --git a/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report b/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report index 04e0dee5a9d..26b42f6cff9 100644 --- a/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report +++ b/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report @@ -43,6 +43,18 @@ FILE_INCLUDE NOTICE found in files: LICENSE at line 107, LICENSE at line 110, LI Files with this license: LICENSE [2:202] +KEEP BSD-3-Clause 39d39c929b62f4e97932945b56b0cafa +BELONGS ya.make + License text: + Amazon.com has chosen to use this file under the terms of the BSD-3-Clause license. + Scancode info: + Original SPDX id: BSD-3-Clause + Score : 100.00 + Match type : NOTICE + Links : http://www.opensource.org/licenses/BSD-3-Clause, https://spdx.org/licenses/BSD-3-Clause + Files with this license: + include/aws/common/external/ittnotify.h [7:7] + KEEP Public-Domain 4f1fe8159202f044650a04f00bc590ca BELONGS ya.make License text: @@ -77,8 +89,7 @@ BELONGS ya.make Match type : TEXT Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT Files with this license: - include/aws/common/external/cJSON.h [4:20] - source/external/cJSON.c [4:20] + THIRD-PARTY-LICENSES.txt [40:56] KEEP Apache-2.0 6c901454b872854c0dea3ec06b67701a BELONGS ya.make @@ -117,6 +128,60 @@ BELONGS ya.make Files with this license: README.md [206:206] +KEEP BSD-3-Clause 8bad928e3b305eb76775886df88d05d2 +BELONGS ya.make + License text: + Amazon.com has chosen to use this file under the terms of the BSD-3-Clause + license. + Scancode info: + Original SPDX id: BSD-3-Clause + Score : 100.00 + Match type : NOTICE + Links : http://www.opensource.org/licenses/BSD-3-Clause, https://spdx.org/licenses/BSD-3-Clause + Files with this license: + THIRD-PARTY-LICENSES.txt [5:6] + +KEEP GPL-2.0-only OR BSD-3-Clause 9eda2091acc554f0e0d6a80f13078e68 +BELONGS ya.make + License text: + SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause + Scancode info: + Original SPDX id: GPL-2.0-only + Score : 100.00 + Match type : TAG + Links : http://www.gnu.org/licenses/gpl-2.0.html, http://www.gnu.org/licenses/gpl-2.0.txt, https://spdx.org/licenses/GPL-2.0-only + Files with this license: + THIRD-PARTY-LICENSES.txt [3:3] + Scancode info: + Original SPDX id: BSD-3-Clause + Score : 100.00 + Match type : TAG + Links : http://www.opensource.org/licenses/BSD-3-Clause, https://spdx.org/licenses/BSD-3-Clause + Files with this license: + THIRD-PARTY-LICENSES.txt [3:3] + +KEEP MIT b86f92e26fd701d7586fe0fb46cb52a5 +BELONGS ya.make + Note: matched license text is too long. Read it in the source files. + Scancode info: + Original SPDX id: MIT + Score : 100.00 + Match type : TEXT + Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT + Files with this license: + source/external/cJSON.c [4:20] + +KEEP MIT c9dbc809b4d1b19ffad9b334b14bd784 +BELONGS ya.make + Note: matched license text is too long. Read it in the source files. + Scancode info: + Original SPDX id: MIT + Score : 100.00 + Match type : TEXT + Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT + Files with this license: + source/external/cJSON.h [4:20] + KEEP Apache-2.0 d3a6c1cb5b7a1e53a61f819a19149ee8 BELONGS ya.make Note: matched license text is too long. Read it in the source files. @@ -126,7 +191,7 @@ BELONGS ya.make Match type : NOTICE Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0 Files with this license: - source/arch/arm/asm/cpuid.c [4:13] + source/arch/arm/auxv/cpuid.c [4:13] KEEP Apache-2.0 d591512e466bb957030b8857f753349e BELONGS ya.make @@ -159,6 +224,7 @@ BELONGS ya.make include/aws/common/common.h [6:6] include/aws/common/condition_variable.h [6:6] include/aws/common/cpuid.h [5:5] + include/aws/common/cross_process_lock.h [5:5] include/aws/common/date_time.h [5:5] include/aws/common/device_random.h [5:5] include/aws/common/encoding.h [6:6] @@ -170,6 +236,7 @@ BELONGS ya.make include/aws/common/fifo_cache.h [5:5] include/aws/common/file.h [5:5] include/aws/common/hash_table.h [6:6] + include/aws/common/host_utils.h [5:5] include/aws/common/json.h [6:6] include/aws/common/lifo_cache.h [5:5] include/aws/common/linked_hash_table.h [5:5] @@ -201,6 +268,7 @@ BELONGS ya.make include/aws/common/private/dlloads.h [5:5] include/aws/common/private/hash_table_impl.h [6:6] include/aws/common/private/json_impl.h [6:6] + include/aws/common/private/system_info_priv.h [5:5] include/aws/common/private/thread_shared.h [5:5] include/aws/common/private/xml_parser_impl.h [6:6] include/aws/common/process.h [5:5] @@ -215,6 +283,7 @@ BELONGS ya.make include/aws/common/string.h [5:5] include/aws/common/string.inl [5:5] include/aws/common/system_info.h [6:6] + include/aws/common/system_resource_util.h [6:6] include/aws/common/task_scheduler.h [6:6] include/aws/common/thread.h [6:6] include/aws/common/thread_scheduler.h [5:5] @@ -244,9 +313,11 @@ BELONGS ya.make source/fifo_cache.c [3:3] source/file.c [3:3] source/hash_table.c [3:3] + source/host_utils.c [3:3] source/json.c [3:3] source/lifo_cache.c [3:3] source/linked_hash_table.c [3:3] + source/linux/system_info.c [3:3] source/log_channel.c [3:3] source/log_formatter.c [3:3] source/log_writer.c [3:3] @@ -256,6 +327,7 @@ BELONGS ya.make source/memtrace.c [3:3] source/posix/clock.c [3:3] source/posix/condition_variable.c [3:3] + source/posix/cross_process_lock.c [3:3] source/posix/device_random.c [3:3] source/posix/environment.c [3:3] source/posix/file.c [3:3] @@ -263,6 +335,7 @@ BELONGS ya.make source/posix/process.c [3:3] source/posix/rw_lock.c [3:3] source/posix/system_info.c [3:3] + source/posix/system_resource_utils.c [3:3] source/posix/thread.c [3:3] source/posix/time.c [3:3] source/priority_queue.c [3:3] @@ -272,6 +345,7 @@ BELONGS ya.make source/ring_buffer.c [3:3] source/statistics.c [3:3] source/string.c [3:3] + source/system_info.c [3:3] source/task_scheduler.c [3:3] source/thread_scheduler.c [3:3] source/thread_shared.c [3:3] @@ -289,3 +363,33 @@ BELONGS ya.make Links : http://www.opensource.org/licenses/BSD-3-Clause, https://spdx.org/licenses/BSD-3-Clause Files with this license: source/posix/time.c [17:41] + +KEEP BSD-3-Clause e60827a1ef7f95e14e36a67c7f48c0e8 +BELONGS ya.make + Note: matched license text is too long. Read it in the source files. + Scancode info: + Original SPDX id: BSD-3-Clause + Score : 100.00 + Match type : TEXT + Links : http://www.opensource.org/licenses/BSD-3-Clause, https://spdx.org/licenses/BSD-3-Clause + Files with this license: + THIRD-PARTY-LICENSES.txt [10:33] + +KEEP GPL-2.0-only OR BSD-3-Clause f80c0de9a57c672e7c2476761e9ffe3d +BELONGS ya.make + License text: + SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause + Scancode info: + Original SPDX id: GPL-2.0-only + Score : 100.00 + Match type : TAG + Links : http://www.gnu.org/licenses/gpl-2.0.html, http://www.gnu.org/licenses/gpl-2.0.txt, https://spdx.org/licenses/GPL-2.0-only + Files with this license: + include/aws/common/external/ittnotify.h [4:4] + Scancode info: + Original SPDX id: BSD-3-Clause + Score : 100.00 + Match type : TAG + Links : http://www.opensource.org/licenses/BSD-3-Clause, https://spdx.org/licenses/BSD-3-Clause + Files with this license: + include/aws/common/external/ittnotify.h [4:4] diff --git a/contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt b/contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt index a950dafebeb..c481a883b7f 100644 --- a/contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt +++ b/contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt @@ -229,6 +229,10 @@ This library is licensed under the Apache 2.0 License. ====================BSD-3-Clause==================== + Amazon.com has chosen to use this file under the terms of the BSD-3-Clause license. + + +====================BSD-3-Clause==================== * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: @@ -256,6 +260,38 @@ This library is licensed under the Apache 2.0 License. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +====================BSD-3-Clause==================== +Amazon.com has chosen to use this file under the terms of the BSD-3-Clause +license. + + +====================BSD-3-Clause==================== +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ====================COPYRIGHT==================== * Copyright 2014 The Chromium Authors. All rights reserved. @@ -265,14 +301,51 @@ This library is licensed under the Apache 2.0 License. ====================COPYRIGHT==================== +Copyright (C) 2005-2019 Intel Corporation +SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause + + +====================COPYRIGHT==================== Copyright (c) 2009-2017 Dave Gamble and cJSON contributors +====================COPYRIGHT==================== +Copyright (c) 2019 Intel Corporation. All rights reserved. + + ====================File: NOTICE==================== AWS C Common Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +====================GPL-2.0-only OR BSD-3-Clause==================== + SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause + + +====================GPL-2.0-only OR BSD-3-Clause==================== +SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause + + +====================MIT==================== + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + ====================MIT==================== Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/contrib/restricted/aws/aws-c-common/.yandex_meta/override.nix b/contrib/restricted/aws/aws-c-common/.yandex_meta/override.nix index fffe2453b53..52e866b49d8 100644 --- a/contrib/restricted/aws/aws-c-common/.yandex_meta/override.nix +++ b/contrib/restricted/aws/aws-c-common/.yandex_meta/override.nix @@ -1,10 +1,10 @@ pkgs: attrs: with pkgs; with attrs; rec { - version = "0.8.23"; + version = "0.9.17"; src = fetchFromGitHub { owner = "awslabs"; repo = "aws-c-common"; rev = "v${version}"; - hash = "sha256-HkRaQnlasayg5Nu2KaEA18360rxAH/tdJ1iqzoi6i2E="; + hash = "sha256-Ee3wkgIOTsZn2PgHoaO5HqblXuOacuKm5vUwkl4Dg+4="; }; } diff --git a/contrib/restricted/aws/aws-c-common/README.md b/contrib/restricted/aws/aws-c-common/README.md index fcedee6076b..95ab315337a 100644 --- a/contrib/restricted/aws/aws-c-common/README.md +++ b/contrib/restricted/aws/aws-c-common/README.md @@ -42,7 +42,7 @@ Or on windows, * -DENABLE_SANITIZERS=ON - Enables gcc/clang sanitizers, by default this adds -fsanitizer=address,undefined to the compile flags for projects that call aws_add_sanitizers. * -DENABLE_FUZZ_TESTS=ON - Includes fuzz tests in the unit test suite. Off by default, because fuzz tests can take a long time. Set -DFUZZ_TESTS_MAX_TIME=N to determine how long to run each fuzz test (default 60s). * -DCMAKE_INSTALL_PREFIX=/path/to/install - Standard way of installing to a user defined path. If specified when configuring aws-c-common, ensure the same prefix is specified when configuring other aws-c-* SDKs. -* -DSTATIC_CRT=ON - On MSVC, use /MT(d) to link MSVCRT +* -DAWS_STATIC_MSVC_RUNTIME_LIBRARY=ON - Windows-only. Turn ON to use the statically-linked MSVC runtime lib, instead of the DLL. ### API style and conventions Every API has a specific set of styles and conventions. We'll outline them here. These conventions are followed in every @@ -121,7 +121,7 @@ have pre-slotted log subjects & error codes for each library. The currently allo | [0x3400, 0x3800) | aws-c-iot | | [0x3800, 0x3C00) | aws-c-s3 | | [0x3C00, 0x4000) | aws-c-sdkutils | -| [0x4000, 0x4400) | (reserved for future project) | +| [0x4000, 0x4400) | aws-crt-kotlin | | [0x4400, 0x4800) | (reserved for future project) | Each library should begin its error and log subject values at the beginning of its range and follow in sequence (don't skip codes). Upon @@ -158,7 +158,7 @@ Example: * Avoid C99 features in header files. For some types such as bool, uint32_t etc..., these are defined if not available for the language standard being used in `aws/common/common.h`, so feel free to use them. * For C++ compatibility, don't put const members in structs. -* Avoid C++ style comments e.g. `//`. +* Avoid C++ style comments e.g. `//` in header files and prefer block style (`/* */`) for long blocks of text. C++ style comments are fine in C files. * All public API functions need C++ guards and Windows dll semantics. * Use Unix line endings. * Where implementation hiding is desired for either ABI or runtime polymorphism reasons, use the `void *impl` pattern. v-tables @@ -197,7 +197,7 @@ Example: Not this: typedef int(*fn_name_fn)(void *); - + * If a callback may be async, then always have it be async. Callbacks that are sometimes async and sometimes sync are hard to code around and lead to bugs (see [this blog post](https://blog.ometer.com/2011/07/24/callbacks-synchronous-and-asynchronous/)). @@ -239,7 +239,7 @@ platform, you have more liberty on this. * When checking more than one error condition, check and log each condition separately with a unique message. Do this: - + if (options->callback == NULL) { AWS_LOGF_ERROR(AWS_LS_SOME_SUBJECT, "Invalid options - callback is null"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); @@ -251,7 +251,7 @@ platform, you have more liberty on this. } Not this: - + if (options->callback == NULL || options->allocator == NULL) { AWS_LOGF_ERROR(AWS_LS_SOME_SUBJECT, "Invalid options - something is null"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); diff --git a/contrib/restricted/aws/aws-c-common/THIRD-PARTY-LICENSES.txt b/contrib/restricted/aws/aws-c-common/THIRD-PARTY-LICENSES.txt new file mode 100644 index 00000000000..7ea3c219dc4 --- /dev/null +++ b/contrib/restricted/aws/aws-c-common/THIRD-PARTY-LICENSES.txt @@ -0,0 +1,57 @@ +** ittapi ittnotify.h; version v3.24.2 -- https://github.com/intel/ittapi/blob/master/include/ittnotify.h +Copyright (C) 2005-2019 Intel Corporation +SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause + +Amazon.com has chosen to use this file under the terms of the BSD-3-Clause +license. + +Copyright (c) 2019 Intel Corporation. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------ + +** cJSON; version 1.7.16 -- https://github.com/DaveGamble/cJSON +Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h b/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h index 69939cc4cbb..1bab20d34b3 100644 --- a/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h +++ b/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h @@ -19,5 +19,15 @@ /* #undef AWS_HAVE_EXECINFO */ /* #undef AWS_HAVE_WINAPI_DESKTOP */ #define AWS_HAVE_LINUX_IF_LINK_H +#define AWS_HAVE_AVX2_INTRINSICS +#define AWS_HAVE_AVX512_INTRINSICS +#define AWS_HAVE_MM256_EXTRACT_EPI64 +#define AWS_HAVE_CLMUL +/* #undef AWS_HAVE_ARM32_CRC */ +/* #undef AWS_HAVE_ARMv8_1 */ +/* #undef AWS_ARCH_ARM64 */ +#define AWS_ARCH_INTEL +#define AWS_ARCH_INTEL_X64 +#define AWS_USE_CPU_EXTENSIONS #endif diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h b/contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h index a9ba14fdc1e..8d092055faa 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h @@ -12,6 +12,22 @@ AWS_PUSH_SANE_WARNING_LEVEL AWS_EXTERN_C_BEGIN +/* + * Quick guide to allocators: + * CRT offers several flavours of allocators: + * - default: basic allocator that invokes system one directly. + * - aligned: basic allocator that aligns small allocations on 8 byte + * boundary and big buffers on 32/64 byte (system dependent) boundary. + * Aligned mem can improve perf on some operations, like memcpy or hashes. + * Depending on a system, can result in higher peak memory count in heavy + * acquire/free scenarios (ex. s3), due to memory fragmentation related to how + * aligned allocators work (over allocate, find aligned offset, release extra memory) + * - wrapped_cf: wraps MacOS's Security Framework allocator. + * - mem_tracer: wraps any allocator and provides tracing functionality to allocations + * - small_block_allocator: pools smaller allocations into preallocated buckets. + * Not actively maintained. Avoid if possible. + */ + /* Allocator structure. An instance of this will be passed around for anything needing memory allocation */ struct aws_allocator { void *(*mem_acquire)(struct aws_allocator *allocator, size_t size); @@ -32,9 +48,16 @@ bool aws_allocator_is_valid(const struct aws_allocator *alloc); AWS_COMMON_API struct aws_allocator *aws_default_allocator(void); +/* + * Allocator that align small allocations on 8 byte boundary and big allocations + * on 32/64 byte boundary. + */ +AWS_COMMON_API +struct aws_allocator *aws_aligned_allocator(void); + #ifdef __MACH__ /* Avoid pulling in CoreFoundation headers in a header file. */ -struct __CFAllocator; +struct __CFAllocator; /* NOLINT(bugprone-reserved-identifier) */ typedef const struct __CFAllocator *CFAllocatorRef; /** diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h index ad7be100bba..4ef924456a4 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h @@ -12,7 +12,7 @@ AWS_PUSH_SANE_WARNING_LEVEL -#define AWS_ARRAY_LIST_DEBUG_FILL 0xDD +enum { AWS_ARRAY_LIST_DEBUG_FILL = 0xDD }; struct aws_array_list { struct aws_allocator *alloc; @@ -60,6 +60,21 @@ void aws_array_list_init_static( size_t item_size); /** + * Initializes an array list with a preallocated array of *already-initialized* elements. item_count is the number of + * elements in the array, and item_size is the size in bytes of each element. + * + * Once initialized, nothing further can be added to the list, since it will be full and cannot resize. + * + * Primary use case is to treat an already-initialized C array as an array list. + */ +AWS_STATIC_IMPL +void aws_array_list_init_static_from_initialized( + struct aws_array_list *AWS_RESTRICT list, + void *raw_array, + size_t item_count, + size_t item_size); + +/** * Set of properties of a valid aws_array_list. */ AWS_STATIC_IMPL diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl index 9d3ceb458f5..42f447e92e0 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl @@ -82,6 +82,19 @@ void aws_array_list_init_static( } AWS_STATIC_IMPL +void aws_array_list_init_static_from_initialized( + struct aws_array_list *AWS_RESTRICT list, + void *raw_array, + size_t item_count, + size_t item_size) { + + aws_array_list_init_static(list, raw_array, item_count, item_size); + list->length = item_count; + + AWS_POSTCONDITION(aws_array_list_is_valid(list)); +} + +AWS_STATIC_IMPL bool aws_array_list_is_valid(const struct aws_array_list *AWS_RESTRICT list) { if (!list) { return false; diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/atomics_msvc.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/atomics_msvc.inl index 516385f2721..d1f3ba54c6b 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/atomics_msvc.inl +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/atomics_msvc.inl @@ -20,8 +20,8 @@ AWS_EXTERN_C_BEGIN -#if !(defined(_M_IX86) || defined(_M_X64)) -# error Atomics are not currently supported for non-x86 MSVC platforms +#if !(defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64)) +# error Atomics are not currently supported for non-x86 or ARM64 MSVC platforms /* * In particular, it's not clear that seq_cst will work properly on non-x86 @@ -63,6 +63,24 @@ AWS_EXTERN_C_BEGIN * this use case. */ +/** + * Some general notes about ARM environments: + * ARM processors uses a weak memory model as opposed to the strong memory model used by Intel processors + * This means more permissible memory ordering allowed between stores and loads. + * + * Thus ARM port will need more hardware fences/barriers to assure developer intent. + * Memory barriers will prevent reordering stores and loads accross them depending on their type + * (read write, write only, read only ...) + * + * For more information about ARM64 memory ordering, + * see https://developer.arm.com/documentation/102336/0100/Memory-ordering + * For more information about Memory barriers, + * see https://developer.arm.com/documentation/102336/0100/Memory-barriers + * For more information about Miscosoft Interensic ARM64 APIs, + * see https://learn.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics?view=msvc-170 + * Note: wrt _Interlocked[Op]64 is the same for ARM64 and x64 processors + */ + #ifdef _M_IX86 # define AWS_INTERLOCKED_INT(x) _Interlocked##x typedef long aws_atomic_impl_int_t; @@ -71,6 +89,32 @@ typedef long aws_atomic_impl_int_t; typedef long long aws_atomic_impl_int_t; #endif +#ifdef _M_ARM64 +/* Hardware Read Write barrier, prevents all memory operations to cross the barrier in both directions */ +# define AWS_RW_BARRIER() __dmb(_ARM64_BARRIER_SY) +/* Hardware Read barrier, prevents all memory operations to cross the barrier upwards */ +# define AWS_R_BARRIER() __dmb(_ARM64_BARRIER_LD) +/* Hardware Write barrier, prevents all memory operations to cross the barrier downwards */ +# define AWS_W_BARRIER() __dmb(_ARM64_BARRIER_ST) +/* Software barrier, prevents the compiler from reodering the operations across the barrier */ +# define AWS_SW_BARRIER() _ReadWriteBarrier(); +#else +/* hardware barriers, do nothing on x86 since it has a strong memory model + * as described in the section above: some general notes + */ +# define AWS_RW_BARRIER() +# define AWS_R_BARRIER() +# define AWS_W_BARRIER() +/* + * x86: only a compiler barrier is required. For seq_cst, we must use some form of interlocked operation for + * writes, but that's the caller's responsibility. + * + * Volatile ops may or may not imply this barrier, depending on the /volatile: switch, but adding an extra + * barrier doesn't hurt. + */ +# define AWS_SW_BARRIER() _ReadWriteBarrier(); /* software barrier */ +#endif + static inline void aws_atomic_priv_check_order(enum aws_memory_order order) { #ifndef NDEBUG switch (order) { @@ -107,14 +151,8 @@ static inline void aws_atomic_priv_barrier_before(enum aws_memory_order order, e return; } - /* - * x86: only a compiler barrier is required. For seq_cst, we must use some form of interlocked operation for - * writes, but that's the caller's responsibility. - * - * Volatile ops may or may not imply this barrier, depending on the /volatile: switch, but adding an extra - * barrier doesn't hurt. - */ - _ReadWriteBarrier(); + AWS_RW_BARRIER(); + AWS_SW_BARRIER(); } static inline void aws_atomic_priv_barrier_after(enum aws_memory_order order, enum aws_atomic_mode_priv mode) { @@ -131,11 +169,8 @@ static inline void aws_atomic_priv_barrier_after(enum aws_memory_order order, en return; } - /* - * x86: only a compiler barrier is required. For seq_cst, we must use some form of interlocked operation for - * writes, but that's the caller's responsibility. - */ - _ReadWriteBarrier(); + AWS_RW_BARRIER(); + AWS_SW_BARRIER(); } /** @@ -344,9 +379,16 @@ void aws_atomic_thread_fence(enum aws_memory_order order) { AWS_INTERLOCKED_INT(Exchange)(&x, 1); break; case aws_memory_order_release: + AWS_W_BARRIER(); + AWS_SW_BARRIER(); + break; case aws_memory_order_acquire: + AWS_R_BARRIER(); + AWS_SW_BARRIER(); + break; case aws_memory_order_acq_rel: - _ReadWriteBarrier(); + AWS_RW_BARRIER(); + AWS_SW_BARRIER(); break; case aws_memory_order_relaxed: /* no-op */ @@ -354,6 +396,12 @@ void aws_atomic_thread_fence(enum aws_memory_order order) { } } +/* prevent conflicts with other files that might pick the same names */ +#undef AWS_RW_BARRIER +#undef AWS_R_BARRIER +#undef AWS_W_BARRIER +#undef AWS_SW_BARRIER + #define AWS_ATOMICS_HAVE_THREAD_FENCE AWS_EXTERN_C_END #endif diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h b/contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h index 17b0ae59b4a..6fc5c3ff906 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h @@ -136,6 +136,18 @@ AWS_COMMON_API int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename); /** + * Same as aws_byte_buf_init_from_file(), but for reading "special files" like /proc/cpuinfo. + * These files don't accurately report their size, so size_hint is used as initial buffer size, + * and the buffer grows until the while file is read. + */ +AWS_COMMON_API +int aws_byte_buf_init_from_file_with_size_hint( + struct aws_byte_buf *out_buf, + struct aws_allocator *alloc, + const char *filename, + size_t size_hint); + +/** * Evaluates the set of properties that define the shape of all valid aws_byte_buf structures. * It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion). */ diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/condition_variable.h b/contrib/restricted/aws/aws-c-common/include/aws/common/condition_variable.h index 317dedb9ce3..ba5984cb805 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/condition_variable.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/condition_variable.h @@ -33,7 +33,7 @@ struct aws_condition_variable { * You can do something like struct aws_condition_variable var = * AWS_CONDITION_VARIABLE_INIT; * - * If on Windows and you get an error about AWS_CONDITION_VARIABLE_INIT being undefined, please include Windows.h to get + * If on Windows and you get an error about AWS_CONDITION_VARIABLE_INIT being undefined, please include windows.h to get * CONDITION_VARIABLE_INIT. */ #ifdef _WIN32 diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/cpuid.h b/contrib/restricted/aws/aws-c-common/include/aws/common/cpuid.h index 6f9540d056c..9ab7d505931 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/cpuid.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/cpuid.h @@ -14,8 +14,12 @@ enum aws_cpu_feature_name { AWS_CPU_FEATURE_SSE_4_1, AWS_CPU_FEATURE_SSE_4_2, AWS_CPU_FEATURE_AVX2, + AWS_CPU_FEATURE_AVX512, AWS_CPU_FEATURE_ARM_CRC, AWS_CPU_FEATURE_BMI2, + AWS_CPU_FEATURE_VPCLMULQDQ, + AWS_CPU_FEATURE_ARM_PMULL, + AWS_CPU_FEATURE_ARM_CRYPTO, AWS_CPU_FEATURE_COUNT, }; diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/cross_process_lock.h b/contrib/restricted/aws/aws-c-common/include/aws/common/cross_process_lock.h new file mode 100644 index 00000000000..99ecc5cb6b1 --- /dev/null +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/cross_process_lock.h @@ -0,0 +1,35 @@ +#ifndef AWS_COMMON_CROSS_PROCESS_LOCK_H +#define AWS_COMMON_CROSS_PROCESS_LOCK_H +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/common/byte_buf.h> +#include <aws/common/common.h> + +struct aws_cross_process_lock; +AWS_EXTERN_C_BEGIN + +/** + * Attempts to acquire a system-wide (not per process or per user) lock scoped by instance_nonce. + * For any given unique nonce, a lock will be returned by the first caller. Subsequent calls will + * return NULL and raise AWS_ERROR_MUTEX_CALLER_NOT_OWNER + * until the either the process owning the lock exits or the program owning the lock + * calls aws_cross_process_lock_release() explicitly. + * + * If the process exits before the lock is released, the kernel will unlock it for the next consumer. + */ +AWS_COMMON_API +struct aws_cross_process_lock *aws_cross_process_lock_try_acquire( + struct aws_allocator *allocator, + struct aws_byte_cursor instance_nonce); + +/** + * Releases the lock so the next caller (may be another process) can get an instance of the lock. + */ +AWS_COMMON_API +void aws_cross_process_lock_release(struct aws_cross_process_lock *instance_lock); + +AWS_EXTERN_C_END + +#endif /* AWS_COMMON_CROSS_PROCESS_LOCK_H */ diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h b/contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h index 1e38fa7d534..72ca09a86be 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h @@ -10,8 +10,10 @@ AWS_PUSH_SANE_WARNING_LEVEL -#define AWS_DATE_TIME_STR_MAX_LEN 100 -#define AWS_DATE_TIME_STR_MAX_BASIC_LEN 20 +enum { + AWS_DATE_TIME_STR_MAX_LEN = 100, + AWS_DATE_TIME_STR_MAX_BASIC_LEN = 20, +}; struct aws_byte_buf; struct aws_byte_cursor; @@ -78,11 +80,14 @@ AWS_COMMON_API void aws_date_time_init_epoch_secs(struct aws_date_time *dt, doub * Initializes dt to be the time represented by date_str in format 'fmt'. Returns AWS_OP_SUCCESS if the * string was successfully parsed, returns AWS_OP_ERR if parsing failed. * + * The parser is lenient regarding AWS_DATE_FORMAT_ISO_8601 vs AWS_DATE_FORMAT_ISO_8601_BASIC. + * Regardless of which you pass in, both "2002-10-02T08:05:09Z" and "20021002T080509Z" would be accepted. + * * Notes for AWS_DATE_FORMAT_RFC822: * If no time zone information is provided, it is assumed to be local time (please don't do this). * - * If the time zone is something other than something indicating Universal Time (e.g. Z, UT, UTC, or GMT) or an offset - * from UTC (e.g. +0100, -0700), parsing will fail. + * Only time zones indicating Universal Time (e.g. Z, UT, UTC, or GMT), + * or offsets from UTC (e.g. +0100, -0700), are accepted. * * Really, it's just better if you always use Universal Time. */ diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/error.h b/contrib/restricted/aws/aws-c-common/include/aws/common/error.h index 8fd0b436011..a40b193fa4a 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/error.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/error.h @@ -132,6 +132,14 @@ void aws_unregister_error_info(const struct aws_error_info_list *error_info); /** * Convert a c library io error into an aws error, and raise it. + * If no conversion is found, fallback_aws_error_code is raised. + * Always returns AWS_OP_ERR. + */ +AWS_COMMON_API +int aws_translate_and_raise_io_error_or(int error_no, int fallback_aws_error_code); + +/** + * Convert a c library io error into an aws error, and raise it. * If no conversion is found, AWS_ERROR_SYS_CALL_FAILURE is raised. * Always returns AWS_OP_ERR. */ @@ -201,6 +209,10 @@ enum aws_common_error { AWS_ERROR_PLATFORM_NOT_SUPPORTED, AWS_ERROR_INVALID_UTF8, AWS_ERROR_GET_HOME_DIRECTORY_FAILED, + AWS_ERROR_INVALID_XML, + AWS_ERROR_FILE_OPEN_FAILURE, + AWS_ERROR_FILE_READ_FAILURE, + AWS_ERROR_FILE_WRITE_FAILURE, AWS_ERROR_END_COMMON_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_COMMON_PACKAGE_ID) }; diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/external/ittnotify.h b/contrib/restricted/aws/aws-c-common/include/aws/common/external/ittnotify.h new file mode 100644 index 00000000000..6a8cb76d81e --- /dev/null +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/external/ittnotify.h @@ -0,0 +1,4668 @@ +/* + Copyright (C) 2005-2019 Intel Corporation + + SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause +*/ +/* + Amazon.com has chosen to use this file under the terms of the BSD-3-Clause license. +*/ +#ifndef _ITTNOTIFY_H_ +#define _ITTNOTIFY_H_ + +/** +@file +@brief Public User API functions and types +@mainpage + +The Instrumentation and Tracing Technology API (ITT API) is used to +annotate a user's program with additional information +that can be used by correctness and performance tools. The user inserts +calls in their program. Those calls generate information that is collected +at runtime, and used by Intel(R) Threading Tools. + +@section API Concepts +The following general concepts are used throughout the API. + +@subsection Unicode Support +Many API functions take character string arguments. On Windows, there +are two versions of each such function. The function name is suffixed +by W if Unicode support is enabled, and by A otherwise. Any API function +that takes a character string argument adheres to this convention. + +@subsection Conditional Compilation +Many users prefer having an option to modify ITT API code when linking it +inside their runtimes. ITT API header file provides a mechanism to replace +ITT API function names inside your code with empty strings. To do this, +define the macros INTEL_NO_ITTNOTIFY_API during compilation and remove the +static library from the linker script. + +@subsection Domains +[see domains] +Domains provide a way to separate notification for different modules or +libraries in a program. Domains are specified by dotted character strings, +e.g. TBB.Internal.Control. + +A mechanism (to be specified) is provided to enable and disable +domains. By default, all domains are enabled. +@subsection Named Entities and Instances +Named entities (frames, regions, tasks, and markers) communicate +information about the program to the analysis tools. A named entity often +refers to a section of program code, or to some set of logical concepts +that the programmer wants to group together. + +Named entities relate to the programmer's static view of the program. When +the program actually executes, many instances of a given named entity +may be created. + +The API annotations denote instances of named entities. The actual +named entities are displayed using the analysis tools. In other words, +the named entities come into existence when instances are created. + +Instances of named entities may have instance identifiers (IDs). Some +API calls use instance identifiers to create relationships between +different instances of named entities. Other API calls associate data +with instances of named entities. + +Some named entities must always have instance IDs. In particular, regions +and frames always have IDs. Task and markers need IDs only if the ID is +needed in another API call (such as adding a relation or metadata). + +The lifetime of instance IDs is distinct from the lifetime of +instances. This allows various relationships to be specified separate +from the actual execution of instances. This flexibility comes at the +expense of extra API calls. + +The same ID may not be reused for different instances, unless a previous +[ref] __itt_id_destroy call for that ID has been issued. +*/ + +/** @cond exclude_from_documentation */ +#ifndef ITT_OS_WIN +# define ITT_OS_WIN 1 +#endif /* ITT_OS_WIN */ + +#ifndef ITT_OS_LINUX +# define ITT_OS_LINUX 2 +#endif /* ITT_OS_LINUX */ + +#ifndef ITT_OS_MAC +# define ITT_OS_MAC 3 +#endif /* ITT_OS_MAC */ + +#ifndef ITT_OS_FREEBSD +# define ITT_OS_FREEBSD 4 +#endif /* ITT_OS_FREEBSD */ + +#ifndef ITT_OS_OPENBSD +# define ITT_OS_OPENBSD 5 +#endif /* ITT_OS_OPENBSD */ + +#ifndef ITT_OS +# if defined WIN32 || defined _WIN32 +# define ITT_OS ITT_OS_WIN +# elif defined( __APPLE__ ) && defined( __MACH__ ) +# define ITT_OS ITT_OS_MAC +# elif defined( __FreeBSD__ ) +# define ITT_OS ITT_OS_FREEBSD +# elif defined( __OpenBSD__) +# define ITT_OS ITT_OS_OPENBSD +# else +# define ITT_OS ITT_OS_LINUX +# endif +#endif /* ITT_OS */ + +#ifndef ITT_PLATFORM_WIN +# define ITT_PLATFORM_WIN 1 +#endif /* ITT_PLATFORM_WIN */ + +#ifndef ITT_PLATFORM_POSIX +# define ITT_PLATFORM_POSIX 2 +#endif /* ITT_PLATFORM_POSIX */ + +#ifndef ITT_PLATFORM_MAC +# define ITT_PLATFORM_MAC 3 +#endif /* ITT_PLATFORM_MAC */ + +#ifndef ITT_PLATFORM_FREEBSD +# define ITT_PLATFORM_FREEBSD 4 +#endif /* ITT_PLATFORM_FREEBSD */ + +#ifndef ITT_PLATFORM_OPENBSD +# define ITT_PLATFORM_OPENBSD 5 +#endif /* ITT_PLATFORM_OPENBSD */ + +#ifndef ITT_PLATFORM +# if ITT_OS==ITT_OS_WIN +# define ITT_PLATFORM ITT_PLATFORM_WIN +# elif ITT_OS==ITT_OS_MAC +# define ITT_PLATFORM ITT_PLATFORM_MAC +# elif ITT_OS==ITT_OS_FREEBSD +# define ITT_PLATFORM ITT_PLATFORM_FREEBSD +# elif ITT_OS==ITT_OS_OPENBSD +# define ITT_PLATFORM ITT_PLATFORM_OPENBSD +# else +# define ITT_PLATFORM ITT_PLATFORM_POSIX +# endif +#endif /* ITT_PLATFORM */ + +#if defined(_UNICODE) && !defined(UNICODE) +#define UNICODE +#endif + +#include <stddef.h> +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#include <tchar.h> +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#include <stdint.h> +#if defined(UNICODE) || defined(_UNICODE) +#include <wchar.h> +#endif /* UNICODE || _UNICODE */ +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +#ifndef ITTAPI_CDECL +# if ITT_PLATFORM==ITT_PLATFORM_WIN +# define ITTAPI_CDECL __cdecl +# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +# if defined _M_IX86 || defined __i386__ +# define ITTAPI_CDECL __attribute__ ((cdecl)) +# else /* _M_IX86 || __i386__ */ +# define ITTAPI_CDECL /* actual only on x86 platform */ +# endif /* _M_IX86 || __i386__ */ +# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* ITTAPI_CDECL */ + +#ifndef STDCALL +# if ITT_PLATFORM==ITT_PLATFORM_WIN +# define STDCALL __stdcall +# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +# if defined _M_IX86 || defined __i386__ +# define STDCALL __attribute__ ((stdcall)) +# else /* _M_IX86 || __i386__ */ +# define STDCALL /* supported only on x86 platform */ +# endif /* _M_IX86 || __i386__ */ +# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* STDCALL */ + +#define ITTAPI ITTAPI_CDECL +#define LIBITTAPI ITTAPI_CDECL + +/* TODO: Temporary for compatibility! */ +#define ITTAPI_CALL ITTAPI_CDECL +#define LIBITTAPI_CALL ITTAPI_CDECL + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +/* use __forceinline (VC++ specific) */ +#if defined(__MINGW32__) && !defined(__cplusplus) +#define ITT_INLINE static __inline__ __attribute__((__always_inline__,__gnu_inline__)) +#else +#define ITT_INLINE static __forceinline +#endif /* __MINGW32__ */ + +#define ITT_INLINE_ATTRIBUTE /* nothing */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +/* + * Generally, functions are not inlined unless optimization is specified. + * For functions declared inline, this attribute inlines the function even + * if no optimization level was specified. + */ +#ifdef __STRICT_ANSI__ +#define ITT_INLINE static +#define ITT_INLINE_ATTRIBUTE __attribute__((unused)) +#else /* __STRICT_ANSI__ */ +#define ITT_INLINE static inline +#define ITT_INLINE_ATTRIBUTE __attribute__((always_inline, unused)) +#endif /* __STRICT_ANSI__ */ +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +/** @endcond */ + +#ifdef INTEL_ITTNOTIFY_ENABLE_LEGACY +# if ITT_PLATFORM==ITT_PLATFORM_WIN +# pragma message("WARNING!!! Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro") +# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +# warning "Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro" +# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +# error #include "legacy/ittnotify.h" +#endif /* INTEL_ITTNOTIFY_ENABLE_LEGACY */ + +/** @cond exclude_from_documentation */ +/* Helper macro for joining tokens */ +#define ITT_JOIN_AUX(p,n) p##n +#define ITT_JOIN(p,n) ITT_JOIN_AUX(p,n) + +#ifdef ITT_MAJOR +#undef ITT_MAJOR +#endif +#ifdef ITT_MINOR +#undef ITT_MINOR +#endif +#define ITT_MAJOR 3 +#define ITT_MINOR 0 + +/* Standard versioning of a token with major and minor version numbers */ +#define ITT_VERSIONIZE(x) \ + ITT_JOIN(x, \ + ITT_JOIN(_, \ + ITT_JOIN(ITT_MAJOR, \ + ITT_JOIN(_, ITT_MINOR)))) + +#ifndef INTEL_ITTNOTIFY_PREFIX +# define INTEL_ITTNOTIFY_PREFIX __itt_ +#endif /* INTEL_ITTNOTIFY_PREFIX */ +#ifndef INTEL_ITTNOTIFY_POSTFIX +# define INTEL_ITTNOTIFY_POSTFIX _ptr_ +#endif /* INTEL_ITTNOTIFY_POSTFIX */ + +#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n) +#define ITTNOTIFY_NAME(n) ITT_VERSIONIZE(ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX))) + +#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n) +#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n) + +#define ITTNOTIFY_VOID_D0(n,d) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d) +#define ITTNOTIFY_VOID_D1(n,d,x) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x) +#define ITTNOTIFY_VOID_D2(n,d,x,y) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y) +#define ITTNOTIFY_VOID_D3(n,d,x,y,z) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z) +#define ITTNOTIFY_VOID_D4(n,d,x,y,z,a) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) +#define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) +#define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) +#define ITTNOTIFY_DATA_D0(n,d) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d) +#define ITTNOTIFY_DATA_D1(n,d,x) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x) +#define ITTNOTIFY_DATA_D2(n,d,x,y) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y) +#define ITTNOTIFY_DATA_D3(n,d,x,y,z) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z) +#define ITTNOTIFY_DATA_D4(n,d,x,y,z,a) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) +#define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) +#define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) + +#ifdef ITT_STUB +#undef ITT_STUB +#endif +#ifdef ITT_STUBV +#undef ITT_STUBV +#endif +#define ITT_STUBV(api,type,name,args) \ + typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args; \ + extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name); +#define ITT_STUB ITT_STUBV +/** @endcond */ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** @cond exclude_from_gpa_documentation */ +/** + * @defgroup public Public API + * @{ + * @} + */ + +/** + * @defgroup control Collection Control + * @ingroup public + * General behavior: application continues to run, but no profiling information is being collected + * + * Pausing occurs not only for the current thread but for all process as well as spawned processes + * - Intel(R) Parallel Inspector and Intel(R) Inspector XE: + * - Does not analyze or report errors that involve memory access. + * - Other errors are reported as usual. Pausing data collection in + * Intel(R) Parallel Inspector and Intel(R) Inspector XE + * only pauses tracing and analyzing memory access. + * It does not pause tracing or analyzing threading APIs. + * . + * - Intel(R) Parallel Amplifier and Intel(R) VTune(TM) Amplifier XE: + * - Does continue to record when new threads are started. + * . + * - Other effects: + * - Possible reduction of runtime overhead. + * . + * @{ + */ +/** @brief Pause collection */ +void ITTAPI __itt_pause(void); +/** @brief Resume collection */ +void ITTAPI __itt_resume(void); +/** @brief Detach collection */ +void ITTAPI __itt_detach(void); + +/** + * @enum __itt_collection_scope + * @brief Enumerator for collection scopes + */ +typedef enum { + __itt_collection_scope_host = 1 << 0, + __itt_collection_scope_offload = 1 << 1, + __itt_collection_scope_all = 0x7FFFFFFF +} __itt_collection_scope; + +/** @brief Pause scoped collection */ +void ITTAPI __itt_pause_scoped(__itt_collection_scope); +/** @brief Resume scoped collection */ +void ITTAPI __itt_resume_scoped(__itt_collection_scope); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, pause, (void)) +ITT_STUBV(ITTAPI, void, pause_scoped, (__itt_collection_scope)) +ITT_STUBV(ITTAPI, void, resume, (void)) +ITT_STUBV(ITTAPI, void, resume_scoped, (__itt_collection_scope)) +ITT_STUBV(ITTAPI, void, detach, (void)) +#define __itt_pause ITTNOTIFY_VOID(pause) +#define __itt_pause_ptr ITTNOTIFY_NAME(pause) +#define __itt_pause_scoped ITTNOTIFY_VOID(pause_scoped) +#define __itt_pause_scoped_ptr ITTNOTIFY_NAME(pause_scoped) +#define __itt_resume ITTNOTIFY_VOID(resume) +#define __itt_resume_ptr ITTNOTIFY_NAME(resume) +#define __itt_resume_scoped ITTNOTIFY_VOID(resume_scoped) +#define __itt_resume_scoped_ptr ITTNOTIFY_NAME(resume_scoped) +#define __itt_detach ITTNOTIFY_VOID(detach) +#define __itt_detach_ptr ITTNOTIFY_NAME(detach) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_pause() +#define __itt_pause_ptr 0 +#define __itt_pause_scoped(scope) +#define __itt_pause_scoped_ptr 0 +#define __itt_resume() +#define __itt_resume_ptr 0 +#define __itt_resume_scoped(scope) +#define __itt_resume_scoped_ptr 0 +#define __itt_detach() +#define __itt_detach_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_pause_ptr 0 +#define __itt_pause_scoped_ptr 0 +#define __itt_resume_ptr 0 +#define __itt_resume_scoped_ptr 0 +#define __itt_detach_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} control group */ +/** @endcond */ + +/** + * @defgroup Intel Processor Trace control + * API from this group provides control over collection and analysis of Intel Processor Trace (Intel PT) data + * Information about Intel Processor Trace technology can be found here (Volume 3 chapter 35): + * https://software.intel.com/sites/default/files/managed/39/c5/325462-sdm-vol-1-2abcd-3abcd.pdf + * Use this API to mark particular code regions for loading detailed performance statistics. + * This mode makes your analysis faster and more accurate. + * @{ +*/ +typedef unsigned char __itt_pt_region; + +/** + * @brief function saves a region name marked with Intel PT API and returns a region id. + * Only 7 names can be registered. Attempts to register more names will be ignored and a region id with auto names will be returned. + * For automatic naming of regions pass NULL as function parameter +*/ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_pt_region ITTAPI __itt_pt_region_createA(const char *name); +__itt_pt_region ITTAPI __itt_pt_region_createW(const wchar_t *name); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_pt_region_create __itt_pt_region_createW +#else /* UNICODE */ +# define __itt_pt_region_create __itt_pt_region_createA +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_pt_region ITTAPI __itt_pt_region_create(const char *name); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_pt_region, pt_region_createA, (const char *name)) +ITT_STUB(ITTAPI, __itt_pt_region, pt_region_createW, (const wchar_t *name)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_pt_region, pt_region_create, (const char *name)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_pt_region_createA ITTNOTIFY_DATA(pt_region_createA) +#define __itt_pt_region_createA_ptr ITTNOTIFY_NAME(pt_region_createA) +#define __itt_pt_region_createW ITTNOTIFY_DATA(pt_region_createW) +#define __itt_pt_region_createW_ptr ITTNOTIFY_NAME(pt_region_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_pt_region_create ITTNOTIFY_DATA(pt_region_create) +#define __itt_pt_region_create_ptr ITTNOTIFY_NAME(pt_region_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_pt_region_createA(name) (__itt_pt_region)0 +#define __itt_pt_region_createA_ptr 0 +#define __itt_pt_region_createW(name) (__itt_pt_region)0 +#define __itt_pt_region_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_pt_region_create(name) (__itt_pt_region)0 +#define __itt_pt_region_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_pt_region_createA_ptr 0 +#define __itt_pt_region_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_pt_region_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief function contains a special code pattern identified on the post-processing stage and + * marks the beginning of a code region targeted for Intel PT analysis + * @param[in] region - region id, 0 <= region < 8 +*/ +void __itt_mark_pt_region_begin(__itt_pt_region region); +/** + * @brief function contains a special code pattern identified on the post-processing stage and + * marks the end of a code region targeted for Intel PT analysis + * @param[in] region - region id, 0 <= region < 8 +*/ +void __itt_mark_pt_region_end(__itt_pt_region region); +/** @} Intel PT control group*/ + +/** + * @defgroup threads Threads + * @ingroup public + * Give names to threads + * @{ + */ +/** + * @brief Sets thread name of calling thread + * @param[in] name - name of thread + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_thread_set_nameA(const char *name); +void ITTAPI __itt_thread_set_nameW(const wchar_t *name); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_thread_set_name __itt_thread_set_nameW +# define __itt_thread_set_name_ptr __itt_thread_set_nameW_ptr +#else /* UNICODE */ +# define __itt_thread_set_name __itt_thread_set_nameA +# define __itt_thread_set_name_ptr __itt_thread_set_nameA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +void ITTAPI __itt_thread_set_name(const char *name); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, thread_set_nameA, (const char *name)) +ITT_STUBV(ITTAPI, void, thread_set_nameW, (const wchar_t *name)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUBV(ITTAPI, void, thread_set_name, (const char *name)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_thread_set_nameA ITTNOTIFY_VOID(thread_set_nameA) +#define __itt_thread_set_nameA_ptr ITTNOTIFY_NAME(thread_set_nameA) +#define __itt_thread_set_nameW ITTNOTIFY_VOID(thread_set_nameW) +#define __itt_thread_set_nameW_ptr ITTNOTIFY_NAME(thread_set_nameW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_thread_set_name ITTNOTIFY_VOID(thread_set_name) +#define __itt_thread_set_name_ptr ITTNOTIFY_NAME(thread_set_name) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_thread_set_nameA(name) +#define __itt_thread_set_nameA_ptr 0 +#define __itt_thread_set_nameW(name) +#define __itt_thread_set_nameW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_thread_set_name(name) +#define __itt_thread_set_name_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_thread_set_nameA_ptr 0 +#define __itt_thread_set_nameW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_thread_set_name_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @cond exclude_from_gpa_documentation */ + +/** + * @brief Mark current thread as ignored from this point on, for the duration of its existence. + */ +void ITTAPI __itt_thread_ignore(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, thread_ignore, (void)) +#define __itt_thread_ignore ITTNOTIFY_VOID(thread_ignore) +#define __itt_thread_ignore_ptr ITTNOTIFY_NAME(thread_ignore) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_thread_ignore() +#define __itt_thread_ignore_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_thread_ignore_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} threads group */ + +/** + * @defgroup suppress Error suppression + * @ingroup public + * General behavior: application continues to run, but errors are suppressed + * + * @{ + */ + +/*****************************************************************//** + * @name group of functions used for error suppression in correctness tools + *********************************************************************/ +/** @{ */ +/** + * @hideinitializer + * @brief possible value for suppression mask + */ +#define __itt_suppress_all_errors 0x7fffffff + +/** + * @hideinitializer + * @brief possible value for suppression mask (suppresses errors from threading analysis) + */ +#define __itt_suppress_threading_errors 0x000000ff + +/** + * @hideinitializer + * @brief possible value for suppression mask (suppresses errors from memory analysis) + */ +#define __itt_suppress_memory_errors 0x0000ff00 + +/** + * @brief Start suppressing errors identified in mask on this thread + */ +void ITTAPI __itt_suppress_push(unsigned int mask); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, suppress_push, (unsigned int mask)) +#define __itt_suppress_push ITTNOTIFY_VOID(suppress_push) +#define __itt_suppress_push_ptr ITTNOTIFY_NAME(suppress_push) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_suppress_push(mask) +#define __itt_suppress_push_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_suppress_push_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Undo the effects of the matching call to __itt_suppress_push + */ +void ITTAPI __itt_suppress_pop(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, suppress_pop, (void)) +#define __itt_suppress_pop ITTNOTIFY_VOID(suppress_pop) +#define __itt_suppress_pop_ptr ITTNOTIFY_NAME(suppress_pop) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_suppress_pop() +#define __itt_suppress_pop_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_suppress_pop_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @enum __itt_suppress_mode + * @brief Enumerator for the suppressing modes + */ +typedef enum __itt_suppress_mode { + __itt_unsuppress_range, + __itt_suppress_range +} __itt_suppress_mode_t; + +/** + * @enum __itt_collection_state + * @brief Enumerator for collection state. + */ +typedef enum { + __itt_collection_uninitialized = 0, /* uninitialized */ + __itt_collection_init_fail = 1, /* failed to init */ + __itt_collection_collector_absent = 2, /* non work state collector is absent */ + __itt_collection_collector_exists = 3, /* work state collector exists */ + __itt_collection_init_successful = 4 /* success to init */ +} __itt_collection_state; + +/** + * @brief Mark a range of memory for error suppression or unsuppression for error types included in mask + */ +void ITTAPI __itt_suppress_mark_range(__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, suppress_mark_range, (__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size)) +#define __itt_suppress_mark_range ITTNOTIFY_VOID(suppress_mark_range) +#define __itt_suppress_mark_range_ptr ITTNOTIFY_NAME(suppress_mark_range) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_suppress_mark_range(mask) +#define __itt_suppress_mark_range_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_suppress_mark_range_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Undo the effect of a matching call to __itt_suppress_mark_range. If not matching + * call is found, nothing is changed. + */ +void ITTAPI __itt_suppress_clear_range(__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, suppress_clear_range, (__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size)) +#define __itt_suppress_clear_range ITTNOTIFY_VOID(suppress_clear_range) +#define __itt_suppress_clear_range_ptr ITTNOTIFY_NAME(suppress_clear_range) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_suppress_clear_range(mask) +#define __itt_suppress_clear_range_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_suppress_clear_range_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} */ +/** @} suppress group */ + +/** + * @defgroup sync Synchronization + * @ingroup public + * Indicate user-written synchronization code + * @{ + */ +/** + * @hideinitializer + * @brief possible value of attribute argument for sync object type + */ +#define __itt_attr_barrier 1 + +/** + * @hideinitializer + * @brief possible value of attribute argument for sync object type + */ +#define __itt_attr_mutex 2 + +/** +@brief Name a synchronization object +@param[in] addr Handle for the synchronization object. You should +use a real address to uniquely identify the synchronization object. +@param[in] objtype null-terminated object type string. If NULL is +passed, the name will be "User Synchronization". +@param[in] objname null-terminated object name string. If NULL, +no name will be assigned to the object. +@param[in] attribute one of [#__itt_attr_barrier, #__itt_attr_mutex] + */ + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_sync_createA(void *addr, const char *objtype, const char *objname, int attribute); +void ITTAPI __itt_sync_createW(void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_sync_create __itt_sync_createW +# define __itt_sync_create_ptr __itt_sync_createW_ptr +#else /* UNICODE */ +# define __itt_sync_create __itt_sync_createA +# define __itt_sync_create_ptr __itt_sync_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +void ITTAPI __itt_sync_create (void *addr, const char *objtype, const char *objname, int attribute); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, sync_createA, (void *addr, const char *objtype, const char *objname, int attribute)) +ITT_STUBV(ITTAPI, void, sync_createW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUBV(ITTAPI, void, sync_create, (void *addr, const char* objtype, const char* objname, int attribute)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_sync_createA ITTNOTIFY_VOID(sync_createA) +#define __itt_sync_createA_ptr ITTNOTIFY_NAME(sync_createA) +#define __itt_sync_createW ITTNOTIFY_VOID(sync_createW) +#define __itt_sync_createW_ptr ITTNOTIFY_NAME(sync_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_sync_create ITTNOTIFY_VOID(sync_create) +#define __itt_sync_create_ptr ITTNOTIFY_NAME(sync_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_sync_createA(addr, objtype, objname, attribute) +#define __itt_sync_createA_ptr 0 +#define __itt_sync_createW(addr, objtype, objname, attribute) +#define __itt_sync_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_sync_create(addr, objtype, objname, attribute) +#define __itt_sync_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_sync_createA_ptr 0 +#define __itt_sync_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_sync_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** +@brief Rename a synchronization object + +You can use the rename call to assign or reassign a name to a given +synchronization object. +@param[in] addr handle for the synchronization object. +@param[in] name null-terminated object name string. +*/ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_sync_renameA(void *addr, const char *name); +void ITTAPI __itt_sync_renameW(void *addr, const wchar_t *name); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_sync_rename __itt_sync_renameW +# define __itt_sync_rename_ptr __itt_sync_renameW_ptr +#else /* UNICODE */ +# define __itt_sync_rename __itt_sync_renameA +# define __itt_sync_rename_ptr __itt_sync_renameA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +void ITTAPI __itt_sync_rename(void *addr, const char *name); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, sync_renameA, (void *addr, const char *name)) +ITT_STUBV(ITTAPI, void, sync_renameW, (void *addr, const wchar_t *name)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUBV(ITTAPI, void, sync_rename, (void *addr, const char *name)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_sync_renameA ITTNOTIFY_VOID(sync_renameA) +#define __itt_sync_renameA_ptr ITTNOTIFY_NAME(sync_renameA) +#define __itt_sync_renameW ITTNOTIFY_VOID(sync_renameW) +#define __itt_sync_renameW_ptr ITTNOTIFY_NAME(sync_renameW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_sync_rename ITTNOTIFY_VOID(sync_rename) +#define __itt_sync_rename_ptr ITTNOTIFY_NAME(sync_rename) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_sync_renameA(addr, name) +#define __itt_sync_renameA_ptr 0 +#define __itt_sync_renameW(addr, name) +#define __itt_sync_renameW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_sync_rename(addr, name) +#define __itt_sync_rename_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_sync_renameA_ptr 0 +#define __itt_sync_renameW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_sync_rename_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + @brief Destroy a synchronization object. + @param addr Handle for the synchronization object. + */ +void ITTAPI __itt_sync_destroy(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, sync_destroy, (void *addr)) +#define __itt_sync_destroy ITTNOTIFY_VOID(sync_destroy) +#define __itt_sync_destroy_ptr ITTNOTIFY_NAME(sync_destroy) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_sync_destroy(addr) +#define __itt_sync_destroy_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_sync_destroy_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/*****************************************************************//** + * @name group of functions is used for performance measurement tools + *********************************************************************/ +/** @{ */ +/** + * @brief Enter spin loop on user-defined sync object + */ +void ITTAPI __itt_sync_prepare(void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, sync_prepare, (void *addr)) +#define __itt_sync_prepare ITTNOTIFY_VOID(sync_prepare) +#define __itt_sync_prepare_ptr ITTNOTIFY_NAME(sync_prepare) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_sync_prepare(addr) +#define __itt_sync_prepare_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_sync_prepare_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Quit spin loop without acquiring spin object + */ +void ITTAPI __itt_sync_cancel(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, sync_cancel, (void *addr)) +#define __itt_sync_cancel ITTNOTIFY_VOID(sync_cancel) +#define __itt_sync_cancel_ptr ITTNOTIFY_NAME(sync_cancel) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_sync_cancel(addr) +#define __itt_sync_cancel_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_sync_cancel_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Successful spin loop completion (sync object acquired) + */ +void ITTAPI __itt_sync_acquired(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, sync_acquired, (void *addr)) +#define __itt_sync_acquired ITTNOTIFY_VOID(sync_acquired) +#define __itt_sync_acquired_ptr ITTNOTIFY_NAME(sync_acquired) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_sync_acquired(addr) +#define __itt_sync_acquired_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_sync_acquired_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Start sync object releasing code. Is called before the lock release call. + */ +void ITTAPI __itt_sync_releasing(void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, sync_releasing, (void *addr)) +#define __itt_sync_releasing ITTNOTIFY_VOID(sync_releasing) +#define __itt_sync_releasing_ptr ITTNOTIFY_NAME(sync_releasing) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_sync_releasing(addr) +#define __itt_sync_releasing_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_sync_releasing_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} */ + +/** @} sync group */ + +/**************************************************************//** + * @name group of functions is used for correctness checking tools + ******************************************************************/ +/** @{ */ +/** + * @ingroup legacy + * @deprecated Legacy API + * @brief Fast synchronization which does no require spinning. + * - This special function is to be used by TBB and OpenMP libraries only when they know + * there is no spin but they need to suppress TC warnings about shared variable modifications. + * - It only has corresponding pointers in static library and does not have corresponding function + * in dynamic library. + * @see void __itt_sync_prepare(void* addr); + */ +void ITTAPI __itt_fsync_prepare(void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, fsync_prepare, (void *addr)) +#define __itt_fsync_prepare ITTNOTIFY_VOID(fsync_prepare) +#define __itt_fsync_prepare_ptr ITTNOTIFY_NAME(fsync_prepare) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_fsync_prepare(addr) +#define __itt_fsync_prepare_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_fsync_prepare_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup legacy + * @deprecated Legacy API + * @brief Fast synchronization which does no require spinning. + * - This special function is to be used by TBB and OpenMP libraries only when they know + * there is no spin but they need to suppress TC warnings about shared variable modifications. + * - It only has corresponding pointers in static library and does not have corresponding function + * in dynamic library. + * @see void __itt_sync_cancel(void *addr); + */ +void ITTAPI __itt_fsync_cancel(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, fsync_cancel, (void *addr)) +#define __itt_fsync_cancel ITTNOTIFY_VOID(fsync_cancel) +#define __itt_fsync_cancel_ptr ITTNOTIFY_NAME(fsync_cancel) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_fsync_cancel(addr) +#define __itt_fsync_cancel_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_fsync_cancel_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup legacy + * @deprecated Legacy API + * @brief Fast synchronization which does no require spinning. + * - This special function is to be used by TBB and OpenMP libraries only when they know + * there is no spin but they need to suppress TC warnings about shared variable modifications. + * - It only has corresponding pointers in static library and does not have corresponding function + * in dynamic library. + * @see void __itt_sync_acquired(void *addr); + */ +void ITTAPI __itt_fsync_acquired(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, fsync_acquired, (void *addr)) +#define __itt_fsync_acquired ITTNOTIFY_VOID(fsync_acquired) +#define __itt_fsync_acquired_ptr ITTNOTIFY_NAME(fsync_acquired) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_fsync_acquired(addr) +#define __itt_fsync_acquired_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_fsync_acquired_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup legacy + * @deprecated Legacy API + * @brief Fast synchronization which does no require spinning. + * - This special function is to be used by TBB and OpenMP libraries only when they know + * there is no spin but they need to suppress TC warnings about shared variable modifications. + * - It only has corresponding pointers in static library and does not have corresponding function + * in dynamic library. + * @see void __itt_sync_releasing(void* addr); + */ +void ITTAPI __itt_fsync_releasing(void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, fsync_releasing, (void *addr)) +#define __itt_fsync_releasing ITTNOTIFY_VOID(fsync_releasing) +#define __itt_fsync_releasing_ptr ITTNOTIFY_NAME(fsync_releasing) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_fsync_releasing(addr) +#define __itt_fsync_releasing_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_fsync_releasing_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} */ + +/** + * @defgroup model Modeling by Intel(R) Parallel Advisor + * @ingroup public + * This is the subset of itt used for modeling by Intel(R) Parallel Advisor. + * This API is called ONLY using annotate.h, by "Annotation" macros + * the user places in their sources during the parallelism modeling steps. + * + * site_begin/end and task_begin/end take the address of handle variables, + * which are writeable by the API. Handles must be 0 initialized prior + * to the first call to begin, or may cause a run-time failure. + * The handles are initialized in a multi-thread safe way by the API if + * the handle is 0. The commonly expected idiom is one static handle to + * identify a site or task. If a site or task of the same name has already + * been started during this collection, the same handle MAY be returned, + * but is not required to be - it is unspecified if data merging is done + * based on name. These routines also take an instance variable. Like + * the lexical instance, these must be 0 initialized. Unlike the lexical + * instance, this is used to track a single dynamic instance. + * + * API used by the Intel(R) Parallel Advisor to describe potential concurrency + * and related activities. User-added source annotations expand to calls + * to these procedures to enable modeling of a hypothetical concurrent + * execution serially. + * @{ + */ +#if !defined(_ADVISOR_ANNOTATE_H_) || defined(ANNOTATE_EXPAND_NULL) + +typedef void* __itt_model_site; /*!< @brief handle for lexical site */ +typedef void* __itt_model_site_instance; /*!< @brief handle for dynamic instance */ +typedef void* __itt_model_task; /*!< @brief handle for lexical site */ +typedef void* __itt_model_task_instance; /*!< @brief handle for dynamic instance */ + +/** + * @enum __itt_model_disable + * @brief Enumerator for the disable methods + */ +typedef enum { + __itt_model_disable_observation, + __itt_model_disable_collection +} __itt_model_disable; + +#endif /* !_ADVISOR_ANNOTATE_H_ || ANNOTATE_EXPAND_NULL */ + +/** + * @brief ANNOTATE_SITE_BEGIN/ANNOTATE_SITE_END support. + * + * site_begin/end model a potential concurrency site. + * site instances may be recursively nested with themselves. + * site_end exits the most recently started but unended site for the current + * thread. The handle passed to end may be used to validate structure. + * Instances of a site encountered on different threads concurrently + * are considered completely distinct. If the site name for two different + * lexical sites match, it is unspecified whether they are treated as the + * same or different for data presentation. + */ +void ITTAPI __itt_model_site_begin(__itt_model_site *site, __itt_model_site_instance *instance, const char *name); +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_model_site_beginW(const wchar_t *name); +#endif +void ITTAPI __itt_model_site_beginA(const char *name); +void ITTAPI __itt_model_site_beginAL(const char *name, size_t siteNameLen); +void ITTAPI __itt_model_site_end (__itt_model_site *site, __itt_model_site_instance *instance); +void ITTAPI __itt_model_site_end_2(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_site_begin, (__itt_model_site *site, __itt_model_site_instance *instance, const char *name)) +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, model_site_beginW, (const wchar_t *name)) +#endif +ITT_STUBV(ITTAPI, void, model_site_beginA, (const char *name)) +ITT_STUBV(ITTAPI, void, model_site_beginAL, (const char *name, size_t siteNameLen)) +ITT_STUBV(ITTAPI, void, model_site_end, (__itt_model_site *site, __itt_model_site_instance *instance)) +ITT_STUBV(ITTAPI, void, model_site_end_2, (void)) +#define __itt_model_site_begin ITTNOTIFY_VOID(model_site_begin) +#define __itt_model_site_begin_ptr ITTNOTIFY_NAME(model_site_begin) +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_model_site_beginW ITTNOTIFY_VOID(model_site_beginW) +#define __itt_model_site_beginW_ptr ITTNOTIFY_NAME(model_site_beginW) +#endif +#define __itt_model_site_beginA ITTNOTIFY_VOID(model_site_beginA) +#define __itt_model_site_beginA_ptr ITTNOTIFY_NAME(model_site_beginA) +#define __itt_model_site_beginAL ITTNOTIFY_VOID(model_site_beginAL) +#define __itt_model_site_beginAL_ptr ITTNOTIFY_NAME(model_site_beginAL) +#define __itt_model_site_end ITTNOTIFY_VOID(model_site_end) +#define __itt_model_site_end_ptr ITTNOTIFY_NAME(model_site_end) +#define __itt_model_site_end_2 ITTNOTIFY_VOID(model_site_end_2) +#define __itt_model_site_end_2_ptr ITTNOTIFY_NAME(model_site_end_2) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_site_begin(site, instance, name) +#define __itt_model_site_begin_ptr 0 +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_model_site_beginW(name) +#define __itt_model_site_beginW_ptr 0 +#endif +#define __itt_model_site_beginA(name) +#define __itt_model_site_beginA_ptr 0 +#define __itt_model_site_beginAL(name, siteNameLen) +#define __itt_model_site_beginAL_ptr 0 +#define __itt_model_site_end(site, instance) +#define __itt_model_site_end_ptr 0 +#define __itt_model_site_end_2() +#define __itt_model_site_end_2_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_site_begin_ptr 0 +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_model_site_beginW_ptr 0 +#endif +#define __itt_model_site_beginA_ptr 0 +#define __itt_model_site_beginAL_ptr 0 +#define __itt_model_site_end_ptr 0 +#define __itt_model_site_end_2_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_TASK_BEGIN/ANNOTATE_TASK_END support + * + * task_begin/end model a potential task, which is contained within the most + * closely enclosing dynamic site. task_end exits the most recently started + * but unended task. The handle passed to end may be used to validate + * structure. It is unspecified if bad dynamic nesting is detected. If it + * is, it should be encoded in the resulting data collection. The collector + * should not fail due to construct nesting issues, nor attempt to directly + * indicate the problem. + */ +void ITTAPI __itt_model_task_begin(__itt_model_task *task, __itt_model_task_instance *instance, const char *name); +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_model_task_beginW(const wchar_t *name); +void ITTAPI __itt_model_iteration_taskW(const wchar_t *name); +#endif +void ITTAPI __itt_model_task_beginA(const char *name); +void ITTAPI __itt_model_task_beginAL(const char *name, size_t taskNameLen); +void ITTAPI __itt_model_iteration_taskA(const char *name); +void ITTAPI __itt_model_iteration_taskAL(const char *name, size_t taskNameLen); +void ITTAPI __itt_model_task_end (__itt_model_task *task, __itt_model_task_instance *instance); +void ITTAPI __itt_model_task_end_2(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_task_begin, (__itt_model_task *task, __itt_model_task_instance *instance, const char *name)) +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, model_task_beginW, (const wchar_t *name)) +ITT_STUBV(ITTAPI, void, model_iteration_taskW, (const wchar_t *name)) +#endif +ITT_STUBV(ITTAPI, void, model_task_beginA, (const char *name)) +ITT_STUBV(ITTAPI, void, model_task_beginAL, (const char *name, size_t taskNameLen)) +ITT_STUBV(ITTAPI, void, model_iteration_taskA, (const char *name)) +ITT_STUBV(ITTAPI, void, model_iteration_taskAL, (const char *name, size_t taskNameLen)) +ITT_STUBV(ITTAPI, void, model_task_end, (__itt_model_task *task, __itt_model_task_instance *instance)) +ITT_STUBV(ITTAPI, void, model_task_end_2, (void)) +#define __itt_model_task_begin ITTNOTIFY_VOID(model_task_begin) +#define __itt_model_task_begin_ptr ITTNOTIFY_NAME(model_task_begin) +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_model_task_beginW ITTNOTIFY_VOID(model_task_beginW) +#define __itt_model_task_beginW_ptr ITTNOTIFY_NAME(model_task_beginW) +#define __itt_model_iteration_taskW ITTNOTIFY_VOID(model_iteration_taskW) +#define __itt_model_iteration_taskW_ptr ITTNOTIFY_NAME(model_iteration_taskW) +#endif +#define __itt_model_task_beginA ITTNOTIFY_VOID(model_task_beginA) +#define __itt_model_task_beginA_ptr ITTNOTIFY_NAME(model_task_beginA) +#define __itt_model_task_beginAL ITTNOTIFY_VOID(model_task_beginAL) +#define __itt_model_task_beginAL_ptr ITTNOTIFY_NAME(model_task_beginAL) +#define __itt_model_iteration_taskA ITTNOTIFY_VOID(model_iteration_taskA) +#define __itt_model_iteration_taskA_ptr ITTNOTIFY_NAME(model_iteration_taskA) +#define __itt_model_iteration_taskAL ITTNOTIFY_VOID(model_iteration_taskAL) +#define __itt_model_iteration_taskAL_ptr ITTNOTIFY_NAME(model_iteration_taskAL) +#define __itt_model_task_end ITTNOTIFY_VOID(model_task_end) +#define __itt_model_task_end_ptr ITTNOTIFY_NAME(model_task_end) +#define __itt_model_task_end_2 ITTNOTIFY_VOID(model_task_end_2) +#define __itt_model_task_end_2_ptr ITTNOTIFY_NAME(model_task_end_2) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_task_begin(task, instance, name) +#define __itt_model_task_begin_ptr 0 +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_model_task_beginW(name) +#define __itt_model_task_beginW_ptr 0 +#endif +#define __itt_model_task_beginA(name) +#define __itt_model_task_beginA_ptr 0 +#define __itt_model_task_beginAL(name, siteNameLen) +#define __itt_model_task_beginAL_ptr 0 +#define __itt_model_iteration_taskA(name) +#define __itt_model_iteration_taskA_ptr 0 +#define __itt_model_iteration_taskAL(name, siteNameLen) +#define __itt_model_iteration_taskAL_ptr 0 +#define __itt_model_task_end(task, instance) +#define __itt_model_task_end_ptr 0 +#define __itt_model_task_end_2() +#define __itt_model_task_end_2_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_task_begin_ptr 0 +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_model_task_beginW_ptr 0 +#endif +#define __itt_model_task_beginA_ptr 0 +#define __itt_model_task_beginAL_ptr 0 +#define __itt_model_iteration_taskA_ptr 0 +#define __itt_model_iteration_taskAL_ptr 0 +#define __itt_model_task_end_ptr 0 +#define __itt_model_task_end_2_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_LOCK_ACQUIRE/ANNOTATE_LOCK_RELEASE support + * + * lock_acquire/release model a potential lock for both lockset and + * performance modeling. Each unique address is modeled as a separate + * lock, with invalid addresses being valid lock IDs. Specifically: + * no storage is accessed by the API at the specified address - it is only + * used for lock identification. Lock acquires may be self-nested and are + * unlocked by a corresponding number of releases. + * (These closely correspond to __itt_sync_acquired/__itt_sync_releasing, + * but may not have identical semantics.) + */ +void ITTAPI __itt_model_lock_acquire(void *lock); +void ITTAPI __itt_model_lock_acquire_2(void *lock); +void ITTAPI __itt_model_lock_release(void *lock); +void ITTAPI __itt_model_lock_release_2(void *lock); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_lock_acquire, (void *lock)) +ITT_STUBV(ITTAPI, void, model_lock_acquire_2, (void *lock)) +ITT_STUBV(ITTAPI, void, model_lock_release, (void *lock)) +ITT_STUBV(ITTAPI, void, model_lock_release_2, (void *lock)) +#define __itt_model_lock_acquire ITTNOTIFY_VOID(model_lock_acquire) +#define __itt_model_lock_acquire_ptr ITTNOTIFY_NAME(model_lock_acquire) +#define __itt_model_lock_acquire_2 ITTNOTIFY_VOID(model_lock_acquire_2) +#define __itt_model_lock_acquire_2_ptr ITTNOTIFY_NAME(model_lock_acquire_2) +#define __itt_model_lock_release ITTNOTIFY_VOID(model_lock_release) +#define __itt_model_lock_release_ptr ITTNOTIFY_NAME(model_lock_release) +#define __itt_model_lock_release_2 ITTNOTIFY_VOID(model_lock_release_2) +#define __itt_model_lock_release_2_ptr ITTNOTIFY_NAME(model_lock_release_2) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_lock_acquire(lock) +#define __itt_model_lock_acquire_ptr 0 +#define __itt_model_lock_acquire_2(lock) +#define __itt_model_lock_acquire_2_ptr 0 +#define __itt_model_lock_release(lock) +#define __itt_model_lock_release_ptr 0 +#define __itt_model_lock_release_2(lock) +#define __itt_model_lock_release_2_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_lock_acquire_ptr 0 +#define __itt_model_lock_acquire_2_ptr 0 +#define __itt_model_lock_release_ptr 0 +#define __itt_model_lock_release_2_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_RECORD_ALLOCATION/ANNOTATE_RECORD_DEALLOCATION support + * + * record_allocation/deallocation describe user-defined memory allocator + * behavior, which may be required for correctness modeling to understand + * when storage is not expected to be actually reused across threads. + */ +void ITTAPI __itt_model_record_allocation (void *addr, size_t size); +void ITTAPI __itt_model_record_deallocation(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_record_allocation, (void *addr, size_t size)) +ITT_STUBV(ITTAPI, void, model_record_deallocation, (void *addr)) +#define __itt_model_record_allocation ITTNOTIFY_VOID(model_record_allocation) +#define __itt_model_record_allocation_ptr ITTNOTIFY_NAME(model_record_allocation) +#define __itt_model_record_deallocation ITTNOTIFY_VOID(model_record_deallocation) +#define __itt_model_record_deallocation_ptr ITTNOTIFY_NAME(model_record_deallocation) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_record_allocation(addr, size) +#define __itt_model_record_allocation_ptr 0 +#define __itt_model_record_deallocation(addr) +#define __itt_model_record_deallocation_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_record_allocation_ptr 0 +#define __itt_model_record_deallocation_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_INDUCTION_USES support + * + * Note particular storage is inductive through the end of the current site + */ +void ITTAPI __itt_model_induction_uses(void* addr, size_t size); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_induction_uses, (void *addr, size_t size)) +#define __itt_model_induction_uses ITTNOTIFY_VOID(model_induction_uses) +#define __itt_model_induction_uses_ptr ITTNOTIFY_NAME(model_induction_uses) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_induction_uses(addr, size) +#define __itt_model_induction_uses_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_induction_uses_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_REDUCTION_USES support + * + * Note particular storage is used for reduction through the end + * of the current site + */ +void ITTAPI __itt_model_reduction_uses(void* addr, size_t size); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_reduction_uses, (void *addr, size_t size)) +#define __itt_model_reduction_uses ITTNOTIFY_VOID(model_reduction_uses) +#define __itt_model_reduction_uses_ptr ITTNOTIFY_NAME(model_reduction_uses) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_reduction_uses(addr, size) +#define __itt_model_reduction_uses_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_reduction_uses_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_OBSERVE_USES support + * + * Have correctness modeling record observations about uses of storage + * through the end of the current site + */ +void ITTAPI __itt_model_observe_uses(void* addr, size_t size); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_observe_uses, (void *addr, size_t size)) +#define __itt_model_observe_uses ITTNOTIFY_VOID(model_observe_uses) +#define __itt_model_observe_uses_ptr ITTNOTIFY_NAME(model_observe_uses) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_observe_uses(addr, size) +#define __itt_model_observe_uses_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_observe_uses_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_CLEAR_USES support + * + * Clear the special handling of a piece of storage related to induction, + * reduction or observe_uses + */ +void ITTAPI __itt_model_clear_uses(void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_clear_uses, (void *addr)) +#define __itt_model_clear_uses ITTNOTIFY_VOID(model_clear_uses) +#define __itt_model_clear_uses_ptr ITTNOTIFY_NAME(model_clear_uses) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_clear_uses(addr) +#define __itt_model_clear_uses_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_clear_uses_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_DISABLE_*_PUSH/ANNOTATE_DISABLE_*_POP support + * + * disable_push/disable_pop push and pop disabling based on a parameter. + * Disabling observations stops processing of memory references during + * correctness modeling, and all annotations that occur in the disabled + * region. This allows description of code that is expected to be handled + * specially during conversion to parallelism or that is not recognized + * by tools (e.g. some kinds of synchronization operations.) + * This mechanism causes all annotations in the disabled region, other + * than disable_push and disable_pop, to be ignored. (For example, this + * might validly be used to disable an entire parallel site and the contained + * tasks and locking in it for data collection purposes.) + * The disable for collection is a more expensive operation, but reduces + * collector overhead significantly. This applies to BOTH correctness data + * collection and performance data collection. For example, a site + * containing a task might only enable data collection for the first 10 + * iterations. Both performance and correctness data should reflect this, + * and the program should run as close to full speed as possible when + * collection is disabled. + */ +void ITTAPI __itt_model_disable_push(__itt_model_disable x); +void ITTAPI __itt_model_disable_pop(void); +void ITTAPI __itt_model_aggregate_task(size_t x); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_disable_push, (__itt_model_disable x)) +ITT_STUBV(ITTAPI, void, model_disable_pop, (void)) +ITT_STUBV(ITTAPI, void, model_aggregate_task, (size_t x)) +#define __itt_model_disable_push ITTNOTIFY_VOID(model_disable_push) +#define __itt_model_disable_push_ptr ITTNOTIFY_NAME(model_disable_push) +#define __itt_model_disable_pop ITTNOTIFY_VOID(model_disable_pop) +#define __itt_model_disable_pop_ptr ITTNOTIFY_NAME(model_disable_pop) +#define __itt_model_aggregate_task ITTNOTIFY_VOID(model_aggregate_task) +#define __itt_model_aggregate_task_ptr ITTNOTIFY_NAME(model_aggregate_task) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_disable_push(x) +#define __itt_model_disable_push_ptr 0 +#define __itt_model_disable_pop() +#define __itt_model_disable_pop_ptr 0 +#define __itt_model_aggregate_task(x) +#define __itt_model_aggregate_task_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_disable_push_ptr 0 +#define __itt_model_disable_pop_ptr 0 +#define __itt_model_aggregate_task_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} model group */ + +/** + * @defgroup heap Heap + * @ingroup public + * Heap group + * @{ + */ + +typedef void* __itt_heap_function; + +/** + * @brief Create an identification for heap function + * @return non-zero identifier or NULL + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_heap_function ITTAPI __itt_heap_function_createA(const char* name, const char* domain); +__itt_heap_function ITTAPI __itt_heap_function_createW(const wchar_t* name, const wchar_t* domain); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_heap_function_create __itt_heap_function_createW +# define __itt_heap_function_create_ptr __itt_heap_function_createW_ptr +#else +# define __itt_heap_function_create __itt_heap_function_createA +# define __itt_heap_function_create_ptr __itt_heap_function_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_heap_function ITTAPI __itt_heap_function_create(const char* name, const char* domain); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createA, (const char* name, const char* domain)) +ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createW, (const wchar_t* name, const wchar_t* domain)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_heap_function, heap_function_create, (const char* name, const char* domain)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_heap_function_createA ITTNOTIFY_DATA(heap_function_createA) +#define __itt_heap_function_createA_ptr ITTNOTIFY_NAME(heap_function_createA) +#define __itt_heap_function_createW ITTNOTIFY_DATA(heap_function_createW) +#define __itt_heap_function_createW_ptr ITTNOTIFY_NAME(heap_function_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_heap_function_create ITTNOTIFY_DATA(heap_function_create) +#define __itt_heap_function_create_ptr ITTNOTIFY_NAME(heap_function_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_heap_function_createA(name, domain) (__itt_heap_function)0 +#define __itt_heap_function_createA_ptr 0 +#define __itt_heap_function_createW(name, domain) (__itt_heap_function)0 +#define __itt_heap_function_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_heap_function_create(name, domain) (__itt_heap_function)0 +#define __itt_heap_function_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_heap_function_createA_ptr 0 +#define __itt_heap_function_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_heap_function_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record an allocation begin occurrence. + */ +void ITTAPI __itt_heap_allocate_begin(__itt_heap_function h, size_t size, int initialized); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_allocate_begin, (__itt_heap_function h, size_t size, int initialized)) +#define __itt_heap_allocate_begin ITTNOTIFY_VOID(heap_allocate_begin) +#define __itt_heap_allocate_begin_ptr ITTNOTIFY_NAME(heap_allocate_begin) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_allocate_begin(h, size, initialized) +#define __itt_heap_allocate_begin_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_allocate_begin_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record an allocation end occurrence. + */ +void ITTAPI __itt_heap_allocate_end(__itt_heap_function h, void** addr, size_t size, int initialized); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_allocate_end, (__itt_heap_function h, void** addr, size_t size, int initialized)) +#define __itt_heap_allocate_end ITTNOTIFY_VOID(heap_allocate_end) +#define __itt_heap_allocate_end_ptr ITTNOTIFY_NAME(heap_allocate_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_allocate_end(h, addr, size, initialized) +#define __itt_heap_allocate_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_allocate_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record a free begin occurrence. + */ +void ITTAPI __itt_heap_free_begin(__itt_heap_function h, void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_free_begin, (__itt_heap_function h, void* addr)) +#define __itt_heap_free_begin ITTNOTIFY_VOID(heap_free_begin) +#define __itt_heap_free_begin_ptr ITTNOTIFY_NAME(heap_free_begin) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_free_begin(h, addr) +#define __itt_heap_free_begin_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_free_begin_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record a free end occurrence. + */ +void ITTAPI __itt_heap_free_end(__itt_heap_function h, void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_free_end, (__itt_heap_function h, void* addr)) +#define __itt_heap_free_end ITTNOTIFY_VOID(heap_free_end) +#define __itt_heap_free_end_ptr ITTNOTIFY_NAME(heap_free_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_free_end(h, addr) +#define __itt_heap_free_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_free_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record a reallocation begin occurrence. + */ +void ITTAPI __itt_heap_reallocate_begin(__itt_heap_function h, void* addr, size_t new_size, int initialized); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_reallocate_begin, (__itt_heap_function h, void* addr, size_t new_size, int initialized)) +#define __itt_heap_reallocate_begin ITTNOTIFY_VOID(heap_reallocate_begin) +#define __itt_heap_reallocate_begin_ptr ITTNOTIFY_NAME(heap_reallocate_begin) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_reallocate_begin(h, addr, new_size, initialized) +#define __itt_heap_reallocate_begin_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_reallocate_begin_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record a reallocation end occurrence. + */ +void ITTAPI __itt_heap_reallocate_end(__itt_heap_function h, void* addr, void** new_addr, size_t new_size, int initialized); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_reallocate_end, (__itt_heap_function h, void* addr, void** new_addr, size_t new_size, int initialized)) +#define __itt_heap_reallocate_end ITTNOTIFY_VOID(heap_reallocate_end) +#define __itt_heap_reallocate_end_ptr ITTNOTIFY_NAME(heap_reallocate_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_reallocate_end(h, addr, new_addr, new_size, initialized) +#define __itt_heap_reallocate_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_reallocate_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @brief internal access begin */ +void ITTAPI __itt_heap_internal_access_begin(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_internal_access_begin, (void)) +#define __itt_heap_internal_access_begin ITTNOTIFY_VOID(heap_internal_access_begin) +#define __itt_heap_internal_access_begin_ptr ITTNOTIFY_NAME(heap_internal_access_begin) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_internal_access_begin() +#define __itt_heap_internal_access_begin_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_internal_access_begin_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @brief internal access end */ +void ITTAPI __itt_heap_internal_access_end(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_internal_access_end, (void)) +#define __itt_heap_internal_access_end ITTNOTIFY_VOID(heap_internal_access_end) +#define __itt_heap_internal_access_end_ptr ITTNOTIFY_NAME(heap_internal_access_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_internal_access_end() +#define __itt_heap_internal_access_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_internal_access_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @brief record memory growth begin */ +void ITTAPI __itt_heap_record_memory_growth_begin(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_record_memory_growth_begin, (void)) +#define __itt_heap_record_memory_growth_begin ITTNOTIFY_VOID(heap_record_memory_growth_begin) +#define __itt_heap_record_memory_growth_begin_ptr ITTNOTIFY_NAME(heap_record_memory_growth_begin) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_record_memory_growth_begin() +#define __itt_heap_record_memory_growth_begin_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_record_memory_growth_begin_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @brief record memory growth end */ +void ITTAPI __itt_heap_record_memory_growth_end(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_record_memory_growth_end, (void)) +#define __itt_heap_record_memory_growth_end ITTNOTIFY_VOID(heap_record_memory_growth_end) +#define __itt_heap_record_memory_growth_end_ptr ITTNOTIFY_NAME(heap_record_memory_growth_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_record_memory_growth_end() +#define __itt_heap_record_memory_growth_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_record_memory_growth_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Specify the type of heap detection/reporting to modify. + */ +/** + * @hideinitializer + * @brief Report on memory leaks. + */ +#define __itt_heap_leaks 0x00000001 + +/** + * @hideinitializer + * @brief Report on memory growth. + */ +#define __itt_heap_growth 0x00000002 + + +/** @brief heap reset detection */ +void ITTAPI __itt_heap_reset_detection(unsigned int reset_mask); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_reset_detection, (unsigned int reset_mask)) +#define __itt_heap_reset_detection ITTNOTIFY_VOID(heap_reset_detection) +#define __itt_heap_reset_detection_ptr ITTNOTIFY_NAME(heap_reset_detection) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_reset_detection() +#define __itt_heap_reset_detection_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_reset_detection_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @brief report */ +void ITTAPI __itt_heap_record(unsigned int record_mask); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_record, (unsigned int record_mask)) +#define __itt_heap_record ITTNOTIFY_VOID(heap_record) +#define __itt_heap_record_ptr ITTNOTIFY_NAME(heap_record) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_record() +#define __itt_heap_record_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_record_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @} heap group */ +/** @endcond */ +/* ========================================================================== */ + +/** + * @defgroup domains Domains + * @ingroup public + * Domains group + * @{ + */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_domain +{ + volatile int flags; /*!< Zero if disabled, non-zero if enabled. The meaning of different non-zero values is reserved to the runtime */ + const char* nameA; /*!< Copy of original name in ASCII. */ +#if defined(UNICODE) || defined(_UNICODE) + const wchar_t* nameW; /*!< Copy of original name in UNICODE. */ +#else /* UNICODE || _UNICODE */ + void* nameW; +#endif /* UNICODE || _UNICODE */ + int extra1; /*!< Reserved to the runtime */ + void* extra2; /*!< Reserved to the runtime */ + struct ___itt_domain* next; +} __itt_domain; + +#pragma pack(pop) +/** @endcond */ + +/** + * @ingroup domains + * @brief Create a domain. + * Create domain using some domain name: the URI naming style is recommended. + * Because the set of domains is expected to be static over the application's + * execution time, there is no mechanism to destroy a domain. + * Any domain can be accessed by any thread in the process, regardless of + * which thread created the domain. This call is thread-safe. + * @param[in] name name of domain + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_domain* ITTAPI __itt_domain_createA(const char *name); +__itt_domain* ITTAPI __itt_domain_createW(const wchar_t *name); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_domain_create __itt_domain_createW +# define __itt_domain_create_ptr __itt_domain_createW_ptr +#else /* UNICODE */ +# define __itt_domain_create __itt_domain_createA +# define __itt_domain_create_ptr __itt_domain_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_domain* ITTAPI __itt_domain_create(const char *name); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_domain*, domain_createA, (const char *name)) +ITT_STUB(ITTAPI, __itt_domain*, domain_createW, (const wchar_t *name)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_domain*, domain_create, (const char *name)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_domain_createA ITTNOTIFY_DATA(domain_createA) +#define __itt_domain_createA_ptr ITTNOTIFY_NAME(domain_createA) +#define __itt_domain_createW ITTNOTIFY_DATA(domain_createW) +#define __itt_domain_createW_ptr ITTNOTIFY_NAME(domain_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_domain_create ITTNOTIFY_DATA(domain_create) +#define __itt_domain_create_ptr ITTNOTIFY_NAME(domain_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_domain_createA(name) (__itt_domain*)0 +#define __itt_domain_createA_ptr 0 +#define __itt_domain_createW(name) (__itt_domain*)0 +#define __itt_domain_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_domain_create(name) (__itt_domain*)0 +#define __itt_domain_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_domain_createA_ptr 0 +#define __itt_domain_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_domain_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} domains group */ + +/** + * @defgroup ids IDs + * @ingroup public + * IDs group + * @{ + */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_id +{ + unsigned long long d1, d2, d3; +} __itt_id; + +#pragma pack(pop) +/** @endcond */ + +static const __itt_id __itt_null = { 0, 0, 0 }; + +/** + * @ingroup ids + * @brief A convenience function is provided to create an ID without domain control. + * @brief This is a convenience function to initialize an __itt_id structure. This function + * does not affect the collector runtime in any way. After you make the ID with this + * function, you still must create it with the __itt_id_create function before using the ID + * to identify a named entity. + * @param[in] addr The address of object; high QWORD of the ID value. + * @param[in] extra The extra data to unique identify object; low QWORD of the ID value. + */ + +ITT_INLINE __itt_id ITTAPI __itt_id_make(void* addr, unsigned long long extra) ITT_INLINE_ATTRIBUTE; +ITT_INLINE __itt_id ITTAPI __itt_id_make(void* addr, unsigned long long extra) +{ + __itt_id id = __itt_null; + id.d1 = (unsigned long long)((uintptr_t)addr); + id.d2 = (unsigned long long)extra; + id.d3 = (unsigned long long)0; /* Reserved. Must be zero */ + return id; +} + +/** + * @ingroup ids + * @brief Create an instance of identifier. + * This establishes the beginning of the lifetime of an instance of + * the given ID in the trace. Once this lifetime starts, the ID + * can be used to tag named entity instances in calls such as + * __itt_task_begin, and to specify relationships among + * identified named entity instances, using the \ref relations APIs. + * Instance IDs are not domain specific! + * @param[in] domain The domain controlling the execution of this call. + * @param[in] id The ID to create. + */ +void ITTAPI __itt_id_create(const __itt_domain *domain, __itt_id id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, id_create, (const __itt_domain *domain, __itt_id id)) +#define __itt_id_create(d,x) ITTNOTIFY_VOID_D1(id_create,d,x) +#define __itt_id_create_ptr ITTNOTIFY_NAME(id_create) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_id_create(domain,id) +#define __itt_id_create_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_id_create_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup ids + * @brief Destroy an instance of identifier. + * This ends the lifetime of the current instance of the given ID value in the trace. + * Any relationships that are established after this lifetime ends are invalid. + * This call must be performed before the given ID value can be reused for a different + * named entity instance. + * @param[in] domain The domain controlling the execution of this call. + * @param[in] id The ID to destroy. + */ +void ITTAPI __itt_id_destroy(const __itt_domain *domain, __itt_id id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, id_destroy, (const __itt_domain *domain, __itt_id id)) +#define __itt_id_destroy(d,x) ITTNOTIFY_VOID_D1(id_destroy,d,x) +#define __itt_id_destroy_ptr ITTNOTIFY_NAME(id_destroy) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_id_destroy(domain,id) +#define __itt_id_destroy_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_id_destroy_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} ids group */ + +/** + * @defgroup handless String Handles + * @ingroup public + * String Handles group + * @{ + */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_string_handle +{ + const char* strA; /*!< Copy of original string in ASCII. */ +#if defined(UNICODE) || defined(_UNICODE) + const wchar_t* strW; /*!< Copy of original string in UNICODE. */ +#else /* UNICODE || _UNICODE */ + void* strW; +#endif /* UNICODE || _UNICODE */ + int extra1; /*!< Reserved. Must be zero */ + void* extra2; /*!< Reserved. Must be zero */ + struct ___itt_string_handle* next; +} __itt_string_handle; + +#pragma pack(pop) +/** @endcond */ + +/** + * @ingroup handles + * @brief Create a string handle. + * Create and return handle value that can be associated with a string. + * Consecutive calls to __itt_string_handle_create with the same name + * return the same value. Because the set of string handles is expected to remain + * static during the application's execution time, there is no mechanism to destroy a string handle. + * Any string handle can be accessed by any thread in the process, regardless of which thread created + * the string handle. This call is thread-safe. + * @param[in] name The input string + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_string_handle* ITTAPI __itt_string_handle_createA(const char *name); +__itt_string_handle* ITTAPI __itt_string_handle_createW(const wchar_t *name); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_string_handle_create __itt_string_handle_createW +# define __itt_string_handle_create_ptr __itt_string_handle_createW_ptr +#else /* UNICODE */ +# define __itt_string_handle_create __itt_string_handle_createA +# define __itt_string_handle_create_ptr __itt_string_handle_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_string_handle* ITTAPI __itt_string_handle_create(const char *name); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createA, (const char *name)) +ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createW, (const wchar_t *name)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_create, (const char *name)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_string_handle_createA ITTNOTIFY_DATA(string_handle_createA) +#define __itt_string_handle_createA_ptr ITTNOTIFY_NAME(string_handle_createA) +#define __itt_string_handle_createW ITTNOTIFY_DATA(string_handle_createW) +#define __itt_string_handle_createW_ptr ITTNOTIFY_NAME(string_handle_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_string_handle_create ITTNOTIFY_DATA(string_handle_create) +#define __itt_string_handle_create_ptr ITTNOTIFY_NAME(string_handle_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_string_handle_createA(name) (__itt_string_handle*)0 +#define __itt_string_handle_createA_ptr 0 +#define __itt_string_handle_createW(name) (__itt_string_handle*)0 +#define __itt_string_handle_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_string_handle_create(name) (__itt_string_handle*)0 +#define __itt_string_handle_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_string_handle_createA_ptr 0 +#define __itt_string_handle_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_string_handle_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} handles group */ + +/** @cond exclude_from_documentation */ +typedef unsigned long long __itt_timestamp; +/** @endcond */ + +#define __itt_timestamp_none ((__itt_timestamp)-1LL) + +/** @cond exclude_from_gpa_documentation */ + +/** + * @ingroup timestamps + * @brief Return timestamp corresponding to the current moment. + * This returns the timestamp in the format that is the most relevant for the current + * host or platform (RDTSC, QPC, and others). You can use the "<" operator to + * compare __itt_timestamp values. + */ +__itt_timestamp ITTAPI __itt_get_timestamp(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, __itt_timestamp, get_timestamp, (void)) +#define __itt_get_timestamp ITTNOTIFY_DATA(get_timestamp) +#define __itt_get_timestamp_ptr ITTNOTIFY_NAME(get_timestamp) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_get_timestamp() +#define __itt_get_timestamp_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_get_timestamp_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} timestamps */ +/** @endcond */ + +/** @cond exclude_from_gpa_documentation */ + +/** + * @defgroup regions Regions + * @ingroup public + * Regions group + * @{ + */ +/** + * @ingroup regions + * @brief Begin of region instance. + * Successive calls to __itt_region_begin with the same ID are ignored + * until a call to __itt_region_end with the same ID + * @param[in] domain The domain for this region instance + * @param[in] id The instance ID for this region instance. Must not be __itt_null + * @param[in] parentid The instance ID for the parent of this region instance, or __itt_null + * @param[in] name The name of this region + */ +void ITTAPI __itt_region_begin(const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name); + +/** + * @ingroup regions + * @brief End of region instance. + * The first call to __itt_region_end with a given ID ends the + * region. Successive calls with the same ID are ignored, as are + * calls that do not have a matching __itt_region_begin call. + * @param[in] domain The domain for this region instance + * @param[in] id The instance ID for this region instance + */ +void ITTAPI __itt_region_end(const __itt_domain *domain, __itt_id id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, region_begin, (const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, region_end, (const __itt_domain *domain, __itt_id id)) +#define __itt_region_begin(d,x,y,z) ITTNOTIFY_VOID_D3(region_begin,d,x,y,z) +#define __itt_region_begin_ptr ITTNOTIFY_NAME(region_begin) +#define __itt_region_end(d,x) ITTNOTIFY_VOID_D1(region_end,d,x) +#define __itt_region_end_ptr ITTNOTIFY_NAME(region_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_region_begin(d,x,y,z) +#define __itt_region_begin_ptr 0 +#define __itt_region_end(d,x) +#define __itt_region_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_region_begin_ptr 0 +#define __itt_region_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} regions group */ + +/** + * @defgroup frames Frames + * @ingroup public + * Frames are similar to regions, but are intended to be easier to use and to implement. + * In particular: + * - Frames always represent periods of elapsed time + * - By default, frames have no nesting relationships + * @{ + */ + +/** + * @ingroup frames + * @brief Begin a frame instance. + * Successive calls to __itt_frame_begin with the + * same ID are ignored until a call to __itt_frame_end with the same ID. + * @param[in] domain The domain for this frame instance + * @param[in] id The instance ID for this frame instance or NULL + */ +void ITTAPI __itt_frame_begin_v3(const __itt_domain *domain, __itt_id *id); + +/** + * @ingroup frames + * @brief End a frame instance. + * The first call to __itt_frame_end with a given ID + * ends the frame. Successive calls with the same ID are ignored, as are + * calls that do not have a matching __itt_frame_begin call. + * @param[in] domain The domain for this frame instance + * @param[in] id The instance ID for this frame instance or NULL for current + */ +void ITTAPI __itt_frame_end_v3(const __itt_domain *domain, __itt_id *id); + +/** + * @ingroup frames + * @brief Submits a frame instance. + * Successive calls to __itt_frame_begin or __itt_frame_submit with the + * same ID are ignored until a call to __itt_frame_end or __itt_frame_submit + * with the same ID. + * Passing special __itt_timestamp_none value as "end" argument means + * take the current timestamp as the end timestamp. + * @param[in] domain The domain for this frame instance + * @param[in] id The instance ID for this frame instance or NULL + * @param[in] begin Timestamp of the beginning of the frame + * @param[in] end Timestamp of the end of the frame + */ +void ITTAPI __itt_frame_submit_v3(const __itt_domain *domain, __itt_id *id, + __itt_timestamp begin, __itt_timestamp end); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, frame_begin_v3, (const __itt_domain *domain, __itt_id *id)) +ITT_STUBV(ITTAPI, void, frame_end_v3, (const __itt_domain *domain, __itt_id *id)) +ITT_STUBV(ITTAPI, void, frame_submit_v3, (const __itt_domain *domain, __itt_id *id, __itt_timestamp begin, __itt_timestamp end)) +#define __itt_frame_begin_v3(d,x) ITTNOTIFY_VOID_D1(frame_begin_v3,d,x) +#define __itt_frame_begin_v3_ptr ITTNOTIFY_NAME(frame_begin_v3) +#define __itt_frame_end_v3(d,x) ITTNOTIFY_VOID_D1(frame_end_v3,d,x) +#define __itt_frame_end_v3_ptr ITTNOTIFY_NAME(frame_end_v3) +#define __itt_frame_submit_v3(d,x,b,e) ITTNOTIFY_VOID_D3(frame_submit_v3,d,x,b,e) +#define __itt_frame_submit_v3_ptr ITTNOTIFY_NAME(frame_submit_v3) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_frame_begin_v3(domain,id) +#define __itt_frame_begin_v3_ptr 0 +#define __itt_frame_end_v3(domain,id) +#define __itt_frame_end_v3_ptr 0 +#define __itt_frame_submit_v3(domain,id,begin,end) +#define __itt_frame_submit_v3_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_frame_begin_v3_ptr 0 +#define __itt_frame_end_v3_ptr 0 +#define __itt_frame_submit_v3_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} frames group */ +/** @endcond */ + +/** + * @defgroup taskgroup Task Group + * @ingroup public + * Task Group + * @{ + */ +/** + * @ingroup task_groups + * @brief Denotes a task_group instance. + * Successive calls to __itt_task_group with the same ID are ignored. + * @param[in] domain The domain for this task_group instance + * @param[in] id The instance ID for this task_group instance. Must not be __itt_null. + * @param[in] parentid The instance ID for the parent of this task_group instance, or __itt_null. + * @param[in] name The name of this task_group + */ +void ITTAPI __itt_task_group(const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, task_group, (const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name)) +#define __itt_task_group(d,x,y,z) ITTNOTIFY_VOID_D3(task_group,d,x,y,z) +#define __itt_task_group_ptr ITTNOTIFY_NAME(task_group) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_task_group(d,x,y,z) +#define __itt_task_group_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_task_group_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} taskgroup group */ + +/** + * @defgroup tasks Tasks + * @ingroup public + * A task instance represents a piece of work performed by a particular + * thread for a period of time. A call to __itt_task_begin creates a + * task instance. This becomes the current instance for that task on that + * thread. A following call to __itt_task_end on the same thread ends the + * instance. There may be multiple simultaneous instances of tasks with the + * same name on different threads. If an ID is specified, the task instance + * receives that ID. Nested tasks are allowed. + * + * Note: The task is defined by the bracketing of __itt_task_begin and + * __itt_task_end on the same thread. If some scheduling mechanism causes + * task switching (the thread executes a different user task) or task + * switching (the user task switches to a different thread) then this breaks + * the notion of current instance. Additional API calls are required to + * deal with that possibility. + * @{ + */ + +/** + * @ingroup tasks + * @brief Begin a task instance. + * @param[in] domain The domain for this task + * @param[in] taskid The instance ID for this task instance, or __itt_null + * @param[in] parentid The parent instance to which this task instance belongs, or __itt_null + * @param[in] name The name of this task + */ +void ITTAPI __itt_task_begin(const __itt_domain *domain, __itt_id taskid, __itt_id parentid, __itt_string_handle *name); + +/** + * @ingroup tasks + * @brief Begin a task instance. + * @param[in] domain The domain for this task + * @param[in] taskid The identifier for this task instance (may be 0) + * @param[in] parentid The parent of this task (may be 0) + * @param[in] fn The pointer to the function you are tracing + */ +void ITTAPI __itt_task_begin_fn(const __itt_domain *domain, __itt_id taskid, __itt_id parentid, void* fn); + +/** + * @ingroup tasks + * @brief End the current task instance. + * @param[in] domain The domain for this task + */ +void ITTAPI __itt_task_end(const __itt_domain *domain); + +/** + * @ingroup tasks + * @brief Begin an overlapped task instance. + * @param[in] domain The domain for this task. + * @param[in] taskid The identifier for this task instance, *cannot* be __itt_null. + * @param[in] parentid The parent of this task, or __itt_null. + * @param[in] name The name of this task. + */ +void ITTAPI __itt_task_begin_overlapped(const __itt_domain* domain, __itt_id taskid, __itt_id parentid, __itt_string_handle* name); + +/** + * @ingroup tasks + * @brief End an overlapped task instance. + * @param[in] domain The domain for this task + * @param[in] taskid Explicit ID of finished task + */ +void ITTAPI __itt_task_end_overlapped(const __itt_domain *domain, __itt_id taskid); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, task_begin, (const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, task_begin_fn, (const __itt_domain *domain, __itt_id id, __itt_id parentid, void* fn)) +ITT_STUBV(ITTAPI, void, task_end, (const __itt_domain *domain)) +ITT_STUBV(ITTAPI, void, task_begin_overlapped, (const __itt_domain *domain, __itt_id taskid, __itt_id parentid, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, task_end_overlapped, (const __itt_domain *domain, __itt_id taskid)) +#define __itt_task_begin(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin,d,x,y,z) +#define __itt_task_begin_ptr ITTNOTIFY_NAME(task_begin) +#define __itt_task_begin_fn(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin_fn,d,x,y,z) +#define __itt_task_begin_fn_ptr ITTNOTIFY_NAME(task_begin_fn) +#define __itt_task_end(d) ITTNOTIFY_VOID_D0(task_end,d) +#define __itt_task_end_ptr ITTNOTIFY_NAME(task_end) +#define __itt_task_begin_overlapped(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin_overlapped,d,x,y,z) +#define __itt_task_begin_overlapped_ptr ITTNOTIFY_NAME(task_begin_overlapped) +#define __itt_task_end_overlapped(d,x) ITTNOTIFY_VOID_D1(task_end_overlapped,d,x) +#define __itt_task_end_overlapped_ptr ITTNOTIFY_NAME(task_end_overlapped) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_task_begin(domain,id,parentid,name) +#define __itt_task_begin_ptr 0 +#define __itt_task_begin_fn(domain,id,parentid,fn) +#define __itt_task_begin_fn_ptr 0 +#define __itt_task_end(domain) +#define __itt_task_end_ptr 0 +#define __itt_task_begin_overlapped(domain,taskid,parentid,name) +#define __itt_task_begin_overlapped_ptr 0 +#define __itt_task_end_overlapped(domain,taskid) +#define __itt_task_end_overlapped_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_task_begin_ptr 0 +#define __itt_task_begin_fn_ptr 0 +#define __itt_task_end_ptr 0 +#define __itt_task_begin_overlapped_ptr 0 +#define __itt_task_end_overlapped_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} tasks group */ + + +/** + * @defgroup markers Markers + * Markers represent a single discreet event in time. Markers have a scope, + * described by an enumerated type __itt_scope. Markers are created by + * the API call __itt_marker. A marker instance can be given an ID for use in + * adding metadata. + * @{ + */ + +/** + * @brief Describes the scope of an event object in the trace. + */ +typedef enum +{ + __itt_scope_unknown = 0, + __itt_scope_global, + __itt_scope_track_group, + __itt_scope_track, + __itt_scope_task, + __itt_scope_marker +} __itt_scope; + +/** @cond exclude_from_documentation */ +#define __itt_marker_scope_unknown __itt_scope_unknown +#define __itt_marker_scope_global __itt_scope_global +#define __itt_marker_scope_process __itt_scope_track_group +#define __itt_marker_scope_thread __itt_scope_track +#define __itt_marker_scope_task __itt_scope_task +/** @endcond */ + +/** + * @ingroup markers + * @brief Create a marker instance + * @param[in] domain The domain for this marker + * @param[in] id The instance ID for this marker or __itt_null + * @param[in] name The name for this marker + * @param[in] scope The scope for this marker + */ +void ITTAPI __itt_marker(const __itt_domain *domain, __itt_id id, __itt_string_handle *name, __itt_scope scope); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, marker, (const __itt_domain *domain, __itt_id id, __itt_string_handle *name, __itt_scope scope)) +#define __itt_marker(d,x,y,z) ITTNOTIFY_VOID_D3(marker,d,x,y,z) +#define __itt_marker_ptr ITTNOTIFY_NAME(marker) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_marker(domain,id,name,scope) +#define __itt_marker_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_marker_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} markers group */ + +/** + * @defgroup metadata Metadata + * The metadata API is used to attach extra information to named + * entities. Metadata can be attached to an identified named entity by ID, + * or to the current entity (which is always a task). + * + * Conceptually metadata has a type (what kind of metadata), a key (the + * name of the metadata), and a value (the actual data). The encoding of + * the value depends on the type of the metadata. + * + * The type of metadata is specified by an enumerated type __itt_metdata_type. + * @{ + */ + +/** + * @ingroup parameters + * @brief describes the type of metadata + */ +typedef enum { + __itt_metadata_unknown = 0, + __itt_metadata_u64, /**< Unsigned 64-bit integer */ + __itt_metadata_s64, /**< Signed 64-bit integer */ + __itt_metadata_u32, /**< Unsigned 32-bit integer */ + __itt_metadata_s32, /**< Signed 32-bit integer */ + __itt_metadata_u16, /**< Unsigned 16-bit integer */ + __itt_metadata_s16, /**< Signed 16-bit integer */ + __itt_metadata_float, /**< Signed 32-bit floating-point */ + __itt_metadata_double /**< SIgned 64-bit floating-point */ +} __itt_metadata_type; + +/** + * @ingroup parameters + * @brief Add metadata to an instance of a named entity. + * @param[in] domain The domain controlling the call + * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task + * @param[in] key The name of the metadata + * @param[in] type The type of the metadata + * @param[in] count The number of elements of the given type. If count == 0, no metadata will be added. + * @param[in] data The metadata itself +*/ +void ITTAPI __itt_metadata_add(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, metadata_add, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data)) +#define __itt_metadata_add(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(metadata_add,d,x,y,z,a,b) +#define __itt_metadata_add_ptr ITTNOTIFY_NAME(metadata_add) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_metadata_add(d,x,y,z,a,b) +#define __itt_metadata_add_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_metadata_add_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup parameters + * @brief Add string metadata to an instance of a named entity. + * @param[in] domain The domain controlling the call + * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task + * @param[in] key The name of the metadata + * @param[in] data The metadata itself + * @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated +*/ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_metadata_str_addA(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length); +void ITTAPI __itt_metadata_str_addW(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const wchar_t *data, size_t length); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_metadata_str_add __itt_metadata_str_addW +# define __itt_metadata_str_add_ptr __itt_metadata_str_addW_ptr +#else /* UNICODE */ +# define __itt_metadata_str_add __itt_metadata_str_addA +# define __itt_metadata_str_add_ptr __itt_metadata_str_addA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +void ITTAPI __itt_metadata_str_add(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length); +#endif + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, metadata_str_addA, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length)) +ITT_STUBV(ITTAPI, void, metadata_str_addW, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const wchar_t *data, size_t length)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUBV(ITTAPI, void, metadata_str_add, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_metadata_str_addA(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_addA,d,x,y,z,a) +#define __itt_metadata_str_addA_ptr ITTNOTIFY_NAME(metadata_str_addA) +#define __itt_metadata_str_addW(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_addW,d,x,y,z,a) +#define __itt_metadata_str_addW_ptr ITTNOTIFY_NAME(metadata_str_addW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_metadata_str_add(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add,d,x,y,z,a) +#define __itt_metadata_str_add_ptr ITTNOTIFY_NAME(metadata_str_add) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_metadata_str_addA(d,x,y,z,a) +#define __itt_metadata_str_addA_ptr 0 +#define __itt_metadata_str_addW(d,x,y,z,a) +#define __itt_metadata_str_addW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_metadata_str_add(d,x,y,z,a) +#define __itt_metadata_str_add_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_metadata_str_addA_ptr 0 +#define __itt_metadata_str_addW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_metadata_str_add_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup parameters + * @brief Add metadata to an instance of a named entity. + * @param[in] domain The domain controlling the call + * @param[in] scope The scope of the instance to which the metadata is to be added + + * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task + + * @param[in] key The name of the metadata + * @param[in] type The type of the metadata + * @param[in] count The number of elements of the given type. If count == 0, no metadata will be added. + * @param[in] data The metadata itself +*/ +void ITTAPI __itt_metadata_add_with_scope(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, metadata_add_with_scope, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data)) +#define __itt_metadata_add_with_scope(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(metadata_add_with_scope,d,x,y,z,a,b) +#define __itt_metadata_add_with_scope_ptr ITTNOTIFY_NAME(metadata_add_with_scope) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_metadata_add_with_scope(d,x,y,z,a,b) +#define __itt_metadata_add_with_scope_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_metadata_add_with_scope_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup parameters + * @brief Add string metadata to an instance of a named entity. + * @param[in] domain The domain controlling the call + * @param[in] scope The scope of the instance to which the metadata is to be added + + * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task + + * @param[in] key The name of the metadata + * @param[in] data The metadata itself + * @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated +*/ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_metadata_str_add_with_scopeA(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length); +void ITTAPI __itt_metadata_str_add_with_scopeW(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const wchar_t *data, size_t length); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_metadata_str_add_with_scope __itt_metadata_str_add_with_scopeW +# define __itt_metadata_str_add_with_scope_ptr __itt_metadata_str_add_with_scopeW_ptr +#else /* UNICODE */ +# define __itt_metadata_str_add_with_scope __itt_metadata_str_add_with_scopeA +# define __itt_metadata_str_add_with_scope_ptr __itt_metadata_str_add_with_scopeA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +void ITTAPI __itt_metadata_str_add_with_scope(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length); +#endif + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, metadata_str_add_with_scopeA, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length)) +ITT_STUBV(ITTAPI, void, metadata_str_add_with_scopeW, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const wchar_t *data, size_t length)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUBV(ITTAPI, void, metadata_str_add_with_scope, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_metadata_str_add_with_scopeA(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add_with_scopeA,d,x,y,z,a) +#define __itt_metadata_str_add_with_scopeA_ptr ITTNOTIFY_NAME(metadata_str_add_with_scopeA) +#define __itt_metadata_str_add_with_scopeW(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add_with_scopeW,d,x,y,z,a) +#define __itt_metadata_str_add_with_scopeW_ptr ITTNOTIFY_NAME(metadata_str_add_with_scopeW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_metadata_str_add_with_scope(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add_with_scope,d,x,y,z,a) +#define __itt_metadata_str_add_with_scope_ptr ITTNOTIFY_NAME(metadata_str_add_with_scope) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_metadata_str_add_with_scopeA(d,x,y,z,a) +#define __itt_metadata_str_add_with_scopeA_ptr 0 +#define __itt_metadata_str_add_with_scopeW(d,x,y,z,a) +#define __itt_metadata_str_add_with_scopeW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_metadata_str_add_with_scope(d,x,y,z,a) +#define __itt_metadata_str_add_with_scope_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_metadata_str_add_with_scopeA_ptr 0 +#define __itt_metadata_str_add_with_scopeW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_metadata_str_add_with_scope_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @} metadata group */ + +/** + * @defgroup relations Relations + * Instances of named entities can be explicitly associated with other + * instances using instance IDs and the relationship API calls. + * + * @{ + */ + +/** + * @ingroup relations + * @brief The kind of relation between two instances is specified by the enumerated type __itt_relation. + * Relations between instances can be added with an API call. The relation + * API uses instance IDs. Relations can be added before or after the actual + * instances are created and persist independently of the instances. This + * is the motivation for having different lifetimes for instance IDs and + * the actual instances. + */ +typedef enum +{ + __itt_relation_is_unknown = 0, + __itt_relation_is_dependent_on, /**< "A is dependent on B" means that A cannot start until B completes */ + __itt_relation_is_sibling_of, /**< "A is sibling of B" means that A and B were created as a group */ + __itt_relation_is_parent_of, /**< "A is parent of B" means that A created B */ + __itt_relation_is_continuation_of, /**< "A is continuation of B" means that A assumes the dependencies of B */ + __itt_relation_is_child_of, /**< "A is child of B" means that A was created by B (inverse of is_parent_of) */ + __itt_relation_is_continued_by, /**< "A is continued by B" means that B assumes the dependencies of A (inverse of is_continuation_of) */ + __itt_relation_is_predecessor_to /**< "A is predecessor to B" means that B cannot start until A completes (inverse of is_dependent_on) */ +} __itt_relation; + +/** + * @ingroup relations + * @brief Add a relation to the current task instance. + * The current task instance is the head of the relation. + * @param[in] domain The domain controlling this call + * @param[in] relation The kind of relation + * @param[in] tail The ID for the tail of the relation + */ +void ITTAPI __itt_relation_add_to_current(const __itt_domain *domain, __itt_relation relation, __itt_id tail); + +/** + * @ingroup relations + * @brief Add a relation between two instance identifiers. + * @param[in] domain The domain controlling this call + * @param[in] head The ID for the head of the relation + * @param[in] relation The kind of relation + * @param[in] tail The ID for the tail of the relation + */ +void ITTAPI __itt_relation_add(const __itt_domain *domain, __itt_id head, __itt_relation relation, __itt_id tail); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, relation_add_to_current, (const __itt_domain *domain, __itt_relation relation, __itt_id tail)) +ITT_STUBV(ITTAPI, void, relation_add, (const __itt_domain *domain, __itt_id head, __itt_relation relation, __itt_id tail)) +#define __itt_relation_add_to_current(d,x,y) ITTNOTIFY_VOID_D2(relation_add_to_current,d,x,y) +#define __itt_relation_add_to_current_ptr ITTNOTIFY_NAME(relation_add_to_current) +#define __itt_relation_add(d,x,y,z) ITTNOTIFY_VOID_D3(relation_add,d,x,y,z) +#define __itt_relation_add_ptr ITTNOTIFY_NAME(relation_add) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_relation_add_to_current(d,x,y) +#define __itt_relation_add_to_current_ptr 0 +#define __itt_relation_add(d,x,y,z) +#define __itt_relation_add_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_relation_add_to_current_ptr 0 +#define __itt_relation_add_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} relations group */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_clock_info +{ + unsigned long long clock_freq; /*!< Clock domain frequency */ + unsigned long long clock_base; /*!< Clock domain base timestamp */ +} __itt_clock_info; + +#pragma pack(pop) +/** @endcond */ + +/** @cond exclude_from_documentation */ +typedef void (ITTAPI *__itt_get_clock_info_fn)(__itt_clock_info* clock_info, void* data); +/** @endcond */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_clock_domain +{ + __itt_clock_info info; /*!< Most recent clock domain info */ + __itt_get_clock_info_fn fn; /*!< Callback function pointer */ + void* fn_data; /*!< Input argument for the callback function */ + int extra1; /*!< Reserved. Must be zero */ + void* extra2; /*!< Reserved. Must be zero */ + struct ___itt_clock_domain* next; +} __itt_clock_domain; + +#pragma pack(pop) +/** @endcond */ + +/** + * @ingroup clockdomains + * @brief Create a clock domain. + * Certain applications require the capability to trace their application using + * a clock domain different than the CPU, for instance the instrumentation of events + * that occur on a GPU. + * Because the set of domains is expected to be static over the application's execution time, + * there is no mechanism to destroy a domain. + * Any domain can be accessed by any thread in the process, regardless of which thread created + * the domain. This call is thread-safe. + * @param[in] fn A pointer to a callback function which retrieves alternative CPU timestamps + * @param[in] fn_data Argument for a callback function; may be NULL + */ +__itt_clock_domain* ITTAPI __itt_clock_domain_create(__itt_get_clock_info_fn fn, void* fn_data); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, __itt_clock_domain*, clock_domain_create, (__itt_get_clock_info_fn fn, void* fn_data)) +#define __itt_clock_domain_create ITTNOTIFY_DATA(clock_domain_create) +#define __itt_clock_domain_create_ptr ITTNOTIFY_NAME(clock_domain_create) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_clock_domain_create(fn,fn_data) (__itt_clock_domain*)0 +#define __itt_clock_domain_create_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_clock_domain_create_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup clockdomains + * @brief Recalculate clock domains frequencies and clock base timestamps. + */ +void ITTAPI __itt_clock_domain_reset(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, clock_domain_reset, (void)) +#define __itt_clock_domain_reset ITTNOTIFY_VOID(clock_domain_reset) +#define __itt_clock_domain_reset_ptr ITTNOTIFY_NAME(clock_domain_reset) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_clock_domain_reset() +#define __itt_clock_domain_reset_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_clock_domain_reset_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup clockdomain + * @brief Create an instance of identifier. This establishes the beginning of the lifetime of + * an instance of the given ID in the trace. Once this lifetime starts, the ID can be used to + * tag named entity instances in calls such as __itt_task_begin, and to specify relationships among + * identified named entity instances, using the \ref relations APIs. + * @param[in] domain The domain controlling the execution of this call. + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] id The ID to create. + */ +void ITTAPI __itt_id_create_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id); + +/** + * @ingroup clockdomain + * @brief Destroy an instance of identifier. This ends the lifetime of the current instance of the + * given ID value in the trace. Any relationships that are established after this lifetime ends are + * invalid. This call must be performed before the given ID value can be reused for a different + * named entity instance. + * @param[in] domain The domain controlling the execution of this call. + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] id The ID to destroy. + */ +void ITTAPI __itt_id_destroy_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, id_create_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id)) +ITT_STUBV(ITTAPI, void, id_destroy_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id)) +#define __itt_id_create_ex(d,x,y,z) ITTNOTIFY_VOID_D3(id_create_ex,d,x,y,z) +#define __itt_id_create_ex_ptr ITTNOTIFY_NAME(id_create_ex) +#define __itt_id_destroy_ex(d,x,y,z) ITTNOTIFY_VOID_D3(id_destroy_ex,d,x,y,z) +#define __itt_id_destroy_ex_ptr ITTNOTIFY_NAME(id_destroy_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_id_create_ex(domain,clock_domain,timestamp,id) +#define __itt_id_create_ex_ptr 0 +#define __itt_id_destroy_ex(domain,clock_domain,timestamp,id) +#define __itt_id_destroy_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_id_create_ex_ptr 0 +#define __itt_id_destroy_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup clockdomain + * @brief Begin a task instance. + * @param[in] domain The domain for this task + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] taskid The instance ID for this task instance, or __itt_null + * @param[in] parentid The parent instance to which this task instance belongs, or __itt_null + * @param[in] name The name of this task + */ +void ITTAPI __itt_task_begin_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name); + +/** + * @ingroup clockdomain + * @brief Begin a task instance. + * @param[in] domain The domain for this task + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] taskid The identifier for this task instance, or __itt_null + * @param[in] parentid The parent of this task, or __itt_null + * @param[in] fn The pointer to the function you are tracing + */ +void ITTAPI __itt_task_begin_fn_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, void* fn); + +/** + * @ingroup clockdomain + * @brief End the current task instance. + * @param[in] domain The domain for this task + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + */ +void ITTAPI __itt_task_end_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, task_begin_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, task_begin_fn_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, void* fn)) +ITT_STUBV(ITTAPI, void, task_end_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp)) +#define __itt_task_begin_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(task_begin_ex,d,x,y,z,a,b) +#define __itt_task_begin_ex_ptr ITTNOTIFY_NAME(task_begin_ex) +#define __itt_task_begin_fn_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(task_begin_fn_ex,d,x,y,z,a,b) +#define __itt_task_begin_fn_ex_ptr ITTNOTIFY_NAME(task_begin_fn_ex) +#define __itt_task_end_ex(d,x,y) ITTNOTIFY_VOID_D2(task_end_ex,d,x,y) +#define __itt_task_end_ex_ptr ITTNOTIFY_NAME(task_end_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_task_begin_ex(domain,clock_domain,timestamp,id,parentid,name) +#define __itt_task_begin_ex_ptr 0 +#define __itt_task_begin_fn_ex(domain,clock_domain,timestamp,id,parentid,fn) +#define __itt_task_begin_fn_ex_ptr 0 +#define __itt_task_end_ex(domain,clock_domain,timestamp) +#define __itt_task_end_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_task_begin_ex_ptr 0 +#define __itt_task_begin_fn_ex_ptr 0 +#define __itt_task_end_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @defgroup counters Counters + * @ingroup public + * Counters are user-defined objects with a monotonically increasing + * value. Counter values are 64-bit unsigned integers. + * Counters have names that can be displayed in + * the tools. + * @{ + */ + +/** + * @brief opaque structure for counter identification + */ +/** @cond exclude_from_documentation */ + +typedef struct ___itt_counter* __itt_counter; + +/** + * @brief Create an unsigned 64 bits integer counter with given name/domain + * + * After __itt_counter_create() is called, __itt_counter_inc(id), __itt_counter_inc_delta(id, delta), + * __itt_counter_set_value(id, value_ptr) or __itt_counter_set_value_ex(id, clock_domain, timestamp, value_ptr) + * can be used to change the value of the counter, where value_ptr is a pointer to an unsigned 64 bits integer + * + * The call is equal to __itt_counter_create_typed(name, domain, __itt_metadata_u64) + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_counter ITTAPI __itt_counter_createA(const char *name, const char *domain); +__itt_counter ITTAPI __itt_counter_createW(const wchar_t *name, const wchar_t *domain); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_counter_create __itt_counter_createW +# define __itt_counter_create_ptr __itt_counter_createW_ptr +#else /* UNICODE */ +# define __itt_counter_create __itt_counter_createA +# define __itt_counter_create_ptr __itt_counter_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_counter ITTAPI __itt_counter_create(const char *name, const char *domain); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char *name, const char *domain)) +ITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_counter, counter_create, (const char *name, const char *domain)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_createA ITTNOTIFY_DATA(counter_createA) +#define __itt_counter_createA_ptr ITTNOTIFY_NAME(counter_createA) +#define __itt_counter_createW ITTNOTIFY_DATA(counter_createW) +#define __itt_counter_createW_ptr ITTNOTIFY_NAME(counter_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create ITTNOTIFY_DATA(counter_create) +#define __itt_counter_create_ptr ITTNOTIFY_NAME(counter_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_createA(name, domain) +#define __itt_counter_createA_ptr 0 +#define __itt_counter_createW(name, domain) +#define __itt_counter_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create(name, domain) +#define __itt_counter_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_createA_ptr 0 +#define __itt_counter_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Increment the unsigned 64 bits integer counter value + * + * Calling this function to non-unsigned 64 bits integer counters has no effect + */ +void ITTAPI __itt_counter_inc(__itt_counter id); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_inc, (__itt_counter id)) +#define __itt_counter_inc ITTNOTIFY_VOID(counter_inc) +#define __itt_counter_inc_ptr ITTNOTIFY_NAME(counter_inc) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_inc(id) +#define __itt_counter_inc_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_inc_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** + * @brief Increment the unsigned 64 bits integer counter value with x + * + * Calling this function to non-unsigned 64 bits integer counters has no effect + */ +void ITTAPI __itt_counter_inc_delta(__itt_counter id, unsigned long long value); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value)) +#define __itt_counter_inc_delta ITTNOTIFY_VOID(counter_inc_delta) +#define __itt_counter_inc_delta_ptr ITTNOTIFY_NAME(counter_inc_delta) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_inc_delta(id, value) +#define __itt_counter_inc_delta_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_inc_delta_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Decrement the unsigned 64 bits integer counter value + * + * Calling this function to non-unsigned 64 bits integer counters has no effect + */ +void ITTAPI __itt_counter_dec(__itt_counter id); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_dec, (__itt_counter id)) +#define __itt_counter_dec ITTNOTIFY_VOID(counter_dec) +#define __itt_counter_dec_ptr ITTNOTIFY_NAME(counter_dec) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_dec(id) +#define __itt_counter_dec_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_dec_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** + * @brief Decrement the unsigned 64 bits integer counter value with x + * + * Calling this function to non-unsigned 64 bits integer counters has no effect + */ +void ITTAPI __itt_counter_dec_delta(__itt_counter id, unsigned long long value); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_dec_delta, (__itt_counter id, unsigned long long value)) +#define __itt_counter_dec_delta ITTNOTIFY_VOID(counter_dec_delta) +#define __itt_counter_dec_delta_ptr ITTNOTIFY_NAME(counter_dec_delta) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_dec_delta(id, value) +#define __itt_counter_dec_delta_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_dec_delta_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup counters + * @brief Increment a counter by one. + * The first call with a given name creates a counter by that name and sets its + * value to zero. Successive calls increment the counter value. + * @param[in] domain The domain controlling the call. Counter names are not domain specific. + * The domain argument is used only to enable or disable the API calls. + * @param[in] name The name of the counter + */ +void ITTAPI __itt_counter_inc_v3(const __itt_domain *domain, __itt_string_handle *name); + +/** + * @ingroup counters + * @brief Increment a counter by the value specified in delta. + * @param[in] domain The domain controlling the call. Counter names are not domain specific. + * The domain argument is used only to enable or disable the API calls. + * @param[in] name The name of the counter + * @param[in] delta The amount by which to increment the counter + */ +void ITTAPI __itt_counter_inc_delta_v3(const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_inc_v3, (const __itt_domain *domain, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, counter_inc_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta)) +#define __itt_counter_inc_v3(d,x) ITTNOTIFY_VOID_D1(counter_inc_v3,d,x) +#define __itt_counter_inc_v3_ptr ITTNOTIFY_NAME(counter_inc_v3) +#define __itt_counter_inc_delta_v3(d,x,y) ITTNOTIFY_VOID_D2(counter_inc_delta_v3,d,x,y) +#define __itt_counter_inc_delta_v3_ptr ITTNOTIFY_NAME(counter_inc_delta_v3) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_inc_v3(domain,name) +#define __itt_counter_inc_v3_ptr 0 +#define __itt_counter_inc_delta_v3(domain,name,delta) +#define __itt_counter_inc_delta_v3_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_inc_v3_ptr 0 +#define __itt_counter_inc_delta_v3_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + + +/** + * @ingroup counters + * @brief Decrement a counter by one. + * The first call with a given name creates a counter by that name and sets its + * value to zero. Successive calls decrement the counter value. + * @param[in] domain The domain controlling the call. Counter names are not domain specific. + * The domain argument is used only to enable or disable the API calls. + * @param[in] name The name of the counter + */ +void ITTAPI __itt_counter_dec_v3(const __itt_domain *domain, __itt_string_handle *name); + +/** + * @ingroup counters + * @brief Decrement a counter by the value specified in delta. + * @param[in] domain The domain controlling the call. Counter names are not domain specific. + * The domain argument is used only to enable or disable the API calls. + * @param[in] name The name of the counter + * @param[in] delta The amount by which to decrement the counter + */ +void ITTAPI __itt_counter_dec_delta_v3(const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_dec_v3, (const __itt_domain *domain, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, counter_dec_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta)) +#define __itt_counter_dec_v3(d,x) ITTNOTIFY_VOID_D1(counter_dec_v3,d,x) +#define __itt_counter_dec_v3_ptr ITTNOTIFY_NAME(counter_dec_v3) +#define __itt_counter_dec_delta_v3(d,x,y) ITTNOTIFY_VOID_D2(counter_dec_delta_v3,d,x,y) +#define __itt_counter_dec_delta_v3_ptr ITTNOTIFY_NAME(counter_dec_delta_v3) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_dec_v3(domain,name) +#define __itt_counter_dec_v3_ptr 0 +#define __itt_counter_dec_delta_v3(domain,name,delta) +#define __itt_counter_dec_delta_v3_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_dec_v3_ptr 0 +#define __itt_counter_dec_delta_v3_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @} counters group */ + + +/** + * @brief Set the counter value + */ +void ITTAPI __itt_counter_set_value(__itt_counter id, void *value_ptr); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_set_value, (__itt_counter id, void *value_ptr)) +#define __itt_counter_set_value ITTNOTIFY_VOID(counter_set_value) +#define __itt_counter_set_value_ptr ITTNOTIFY_NAME(counter_set_value) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_set_value(id, value_ptr) +#define __itt_counter_set_value_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_set_value_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Set the counter value + */ +void ITTAPI __itt_counter_set_value_ex(__itt_counter id, __itt_clock_domain *clock_domain, unsigned long long timestamp, void *value_ptr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_set_value_ex, (__itt_counter id, __itt_clock_domain *clock_domain, unsigned long long timestamp, void *value_ptr)) +#define __itt_counter_set_value_ex ITTNOTIFY_VOID(counter_set_value_ex) +#define __itt_counter_set_value_ex_ptr ITTNOTIFY_NAME(counter_set_value_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_set_value_ex(id, clock_domain, timestamp, value_ptr) +#define __itt_counter_set_value_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_set_value_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Create a typed counter with given name/domain + * + * After __itt_counter_create_typed() is called, __itt_counter_inc(id), __itt_counter_inc_delta(id, delta), + * __itt_counter_set_value(id, value_ptr) or __itt_counter_set_value_ex(id, clock_domain, timestamp, value_ptr) + * can be used to change the value of the counter + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_counter ITTAPI __itt_counter_create_typedA(const char *name, const char *domain, __itt_metadata_type type); +__itt_counter ITTAPI __itt_counter_create_typedW(const wchar_t *name, const wchar_t *domain, __itt_metadata_type type); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_counter_create_typed __itt_counter_create_typedW +# define __itt_counter_create_typed_ptr __itt_counter_create_typedW_ptr +#else /* UNICODE */ +# define __itt_counter_create_typed __itt_counter_create_typedA +# define __itt_counter_create_typed_ptr __itt_counter_create_typedA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_counter ITTAPI __itt_counter_create_typed(const char *name, const char *domain, __itt_metadata_type type); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_counter, counter_create_typedA, (const char *name, const char *domain, __itt_metadata_type type)) +ITT_STUB(ITTAPI, __itt_counter, counter_create_typedW, (const wchar_t *name, const wchar_t *domain, __itt_metadata_type type)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_counter, counter_create_typed, (const char *name, const char *domain, __itt_metadata_type type)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_create_typedA ITTNOTIFY_DATA(counter_create_typedA) +#define __itt_counter_create_typedA_ptr ITTNOTIFY_NAME(counter_create_typedA) +#define __itt_counter_create_typedW ITTNOTIFY_DATA(counter_create_typedW) +#define __itt_counter_create_typedW_ptr ITTNOTIFY_NAME(counter_create_typedW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_typed ITTNOTIFY_DATA(counter_create_typed) +#define __itt_counter_create_typed_ptr ITTNOTIFY_NAME(counter_create_typed) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_create_typedA(name, domain, type) +#define __itt_counter_create_typedA_ptr 0 +#define __itt_counter_create_typedW(name, domain, type) +#define __itt_counter_create_typedW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_typed(name, domain, type) +#define __itt_counter_create_typed_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_create_typedA_ptr 0 +#define __itt_counter_create_typedW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_typed_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Destroy the counter identified by the pointer previously returned by __itt_counter_create() or + * __itt_counter_create_typed() + */ +void ITTAPI __itt_counter_destroy(__itt_counter id); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_destroy, (__itt_counter id)) +#define __itt_counter_destroy ITTNOTIFY_VOID(counter_destroy) +#define __itt_counter_destroy_ptr ITTNOTIFY_NAME(counter_destroy) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_destroy(id) +#define __itt_counter_destroy_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_destroy_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} counters group */ + +/** + * @ingroup markers + * @brief Create a marker instance. + * @param[in] domain The domain for this marker + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] id The instance ID for this marker, or __itt_null + * @param[in] name The name for this marker + * @param[in] scope The scope for this marker + */ +void ITTAPI __itt_marker_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, marker_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope)) +#define __itt_marker_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(marker_ex,d,x,y,z,a,b) +#define __itt_marker_ex_ptr ITTNOTIFY_NAME(marker_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_marker_ex(domain,clock_domain,timestamp,id,name,scope) +#define __itt_marker_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_marker_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup clockdomain + * @brief Add a relation to the current task instance. + * The current task instance is the head of the relation. + * @param[in] domain The domain controlling this call + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] relation The kind of relation + * @param[in] tail The ID for the tail of the relation + */ +void ITTAPI __itt_relation_add_to_current_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail); + +/** + * @ingroup clockdomain + * @brief Add a relation between two instance identifiers. + * @param[in] domain The domain controlling this call + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] head The ID for the head of the relation + * @param[in] relation The kind of relation + * @param[in] tail The ID for the tail of the relation + */ +void ITTAPI __itt_relation_add_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, relation_add_to_current_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail)) +ITT_STUBV(ITTAPI, void, relation_add_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail)) +#define __itt_relation_add_to_current_ex(d,x,y,z,a) ITTNOTIFY_VOID_D4(relation_add_to_current_ex,d,x,y,z,a) +#define __itt_relation_add_to_current_ex_ptr ITTNOTIFY_NAME(relation_add_to_current_ex) +#define __itt_relation_add_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(relation_add_ex,d,x,y,z,a,b) +#define __itt_relation_add_ex_ptr ITTNOTIFY_NAME(relation_add_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_relation_add_to_current_ex(domain,clock_domain,timestame,relation,tail) +#define __itt_relation_add_to_current_ex_ptr 0 +#define __itt_relation_add_ex(domain,clock_domain,timestamp,head,relation,tail) +#define __itt_relation_add_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_relation_add_to_current_ex_ptr 0 +#define __itt_relation_add_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @cond exclude_from_documentation */ +typedef enum ___itt_track_group_type +{ + __itt_track_group_type_normal = 0 +} __itt_track_group_type; +/** @endcond */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_track_group +{ + __itt_string_handle* name; /*!< Name of the track group */ + struct ___itt_track* track; /*!< List of child tracks */ + __itt_track_group_type tgtype; /*!< Type of the track group */ + int extra1; /*!< Reserved. Must be zero */ + void* extra2; /*!< Reserved. Must be zero */ + struct ___itt_track_group* next; +} __itt_track_group; + +#pragma pack(pop) +/** @endcond */ + +/** + * @brief Placeholder for custom track types. Currently, "normal" custom track + * is the only available track type. + */ +typedef enum ___itt_track_type +{ + __itt_track_type_normal = 0 +#ifdef INTEL_ITTNOTIFY_API_PRIVATE + , __itt_track_type_queue +#endif /* INTEL_ITTNOTIFY_API_PRIVATE */ +} __itt_track_type; + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_track +{ + __itt_string_handle* name; /*!< Name of the track group */ + __itt_track_group* group; /*!< Parent group to a track */ + __itt_track_type ttype; /*!< Type of the track */ + int extra1; /*!< Reserved. Must be zero */ + void* extra2; /*!< Reserved. Must be zero */ + struct ___itt_track* next; +} __itt_track; + +#pragma pack(pop) +/** @endcond */ + +/** + * @brief Create logical track group. + */ +__itt_track_group* ITTAPI __itt_track_group_create(__itt_string_handle* name, __itt_track_group_type track_group_type); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, __itt_track_group*, track_group_create, (__itt_string_handle* name, __itt_track_group_type track_group_type)) +#define __itt_track_group_create ITTNOTIFY_DATA(track_group_create) +#define __itt_track_group_create_ptr ITTNOTIFY_NAME(track_group_create) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_track_group_create(name) (__itt_track_group*)0 +#define __itt_track_group_create_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_track_group_create_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Create logical track. + */ +__itt_track* ITTAPI __itt_track_create(__itt_track_group* track_group, __itt_string_handle* name, __itt_track_type track_type); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, __itt_track*, track_create, (__itt_track_group* track_group,__itt_string_handle* name, __itt_track_type track_type)) +#define __itt_track_create ITTNOTIFY_DATA(track_create) +#define __itt_track_create_ptr ITTNOTIFY_NAME(track_create) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_track_create(track_group,name,track_type) (__itt_track*)0 +#define __itt_track_create_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_track_create_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Set the logical track. + */ +void ITTAPI __itt_set_track(__itt_track* track); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, set_track, (__itt_track *track)) +#define __itt_set_track ITTNOTIFY_VOID(set_track) +#define __itt_set_track_ptr ITTNOTIFY_NAME(set_track) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_set_track(track) +#define __itt_set_track_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_set_track_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/* ========================================================================== */ +/** @cond exclude_from_gpa_documentation */ +/** + * @defgroup events Events + * @ingroup public + * Events group + * @{ + */ +/** @brief user event type */ +typedef int __itt_event; + +/** + * @brief Create an event notification + * @note name or namelen being null/name and namelen not matching, user event feature not enabled + * @return non-zero event identifier upon success and __itt_err otherwise + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_event LIBITTAPI __itt_event_createA(const char *name, int namelen); +__itt_event LIBITTAPI __itt_event_createW(const wchar_t *name, int namelen); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_event_create __itt_event_createW +# define __itt_event_create_ptr __itt_event_createW_ptr +#else +# define __itt_event_create __itt_event_createA +# define __itt_event_create_ptr __itt_event_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_event LIBITTAPI __itt_event_create(const char *name, int namelen); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char *name, int namelen)) +ITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(LIBITTAPI, __itt_event, event_create, (const char *name, int namelen)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_event_createA ITTNOTIFY_DATA(event_createA) +#define __itt_event_createA_ptr ITTNOTIFY_NAME(event_createA) +#define __itt_event_createW ITTNOTIFY_DATA(event_createW) +#define __itt_event_createW_ptr ITTNOTIFY_NAME(event_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_event_create ITTNOTIFY_DATA(event_create) +#define __itt_event_create_ptr ITTNOTIFY_NAME(event_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_event_createA(name, namelen) (__itt_event)0 +#define __itt_event_createA_ptr 0 +#define __itt_event_createW(name, namelen) (__itt_event)0 +#define __itt_event_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_event_create(name, namelen) (__itt_event)0 +#define __itt_event_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_event_createA_ptr 0 +#define __itt_event_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_event_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record an event occurrence. + * @return __itt_err upon failure (invalid event id/user event feature not enabled) + */ +int LIBITTAPI __itt_event_start(__itt_event event); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(LIBITTAPI, int, event_start, (__itt_event event)) +#define __itt_event_start ITTNOTIFY_DATA(event_start) +#define __itt_event_start_ptr ITTNOTIFY_NAME(event_start) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_event_start(event) (int)0 +#define __itt_event_start_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_event_start_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record an event end occurrence. + * @note It is optional if events do not have durations. + * @return __itt_err upon failure (invalid event id/user event feature not enabled) + */ +int LIBITTAPI __itt_event_end(__itt_event event); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(LIBITTAPI, int, event_end, (__itt_event event)) +#define __itt_event_end ITTNOTIFY_DATA(event_end) +#define __itt_event_end_ptr ITTNOTIFY_NAME(event_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_event_end(event) (int)0 +#define __itt_event_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_event_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} events group */ + + +/** + * @defgroup arrays Arrays Visualizer + * @ingroup public + * Visualize arrays + * @{ + */ + +/** + * @enum __itt_av_data_type + * @brief Defines types of arrays data (for C/C++ intrinsic types) + */ +typedef enum +{ + __itt_e_first = 0, + __itt_e_char = 0, /* 1-byte integer */ + __itt_e_uchar, /* 1-byte unsigned integer */ + __itt_e_int16, /* 2-byte integer */ + __itt_e_uint16, /* 2-byte unsigned integer */ + __itt_e_int32, /* 4-byte integer */ + __itt_e_uint32, /* 4-byte unsigned integer */ + __itt_e_int64, /* 8-byte integer */ + __itt_e_uint64, /* 8-byte unsigned integer */ + __itt_e_float, /* 4-byte floating */ + __itt_e_double, /* 8-byte floating */ + __itt_e_last = __itt_e_double +} __itt_av_data_type; + +/** + * @brief Save an array data to a file. + * Output format is defined by the file extension. The csv and bmp formats are supported (bmp - for 2-dimensional array only). + * @param[in] data - pointer to the array data + * @param[in] rank - the rank of the array + * @param[in] dimensions - pointer to an array of integers, which specifies the array dimensions. + * The size of dimensions must be equal to the rank + * @param[in] type - the type of the array, specified as one of the __itt_av_data_type values (for intrinsic types) + * @param[in] filePath - the file path; the output format is defined by the file extension + * @param[in] columnOrder - defines how the array is stored in the linear memory. + * It should be 1 for column-major order (e.g. in FORTRAN) or 0 - for row-major order (e.g. in C). + */ + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +int ITTAPI __itt_av_saveA(void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder); +int ITTAPI __itt_av_saveW(void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_av_save __itt_av_saveW +# define __itt_av_save_ptr __itt_av_saveW_ptr +#else /* UNICODE */ +# define __itt_av_save __itt_av_saveA +# define __itt_av_save_ptr __itt_av_saveA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +int ITTAPI __itt_av_save(void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, int, av_saveA, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder)) +ITT_STUB(ITTAPI, int, av_saveW, (void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, int, av_save, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_av_saveA ITTNOTIFY_DATA(av_saveA) +#define __itt_av_saveA_ptr ITTNOTIFY_NAME(av_saveA) +#define __itt_av_saveW ITTNOTIFY_DATA(av_saveW) +#define __itt_av_saveW_ptr ITTNOTIFY_NAME(av_saveW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_av_save ITTNOTIFY_DATA(av_save) +#define __itt_av_save_ptr ITTNOTIFY_NAME(av_save) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_av_saveA(name) +#define __itt_av_saveA_ptr 0 +#define __itt_av_saveW(name) +#define __itt_av_saveW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_av_save(name) +#define __itt_av_save_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_av_saveA_ptr 0 +#define __itt_av_saveW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_av_save_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +void ITTAPI __itt_enable_attach(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, enable_attach, (void)) +#define __itt_enable_attach ITTNOTIFY_VOID(enable_attach) +#define __itt_enable_attach_ptr ITTNOTIFY_NAME(enable_attach) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_enable_attach() +#define __itt_enable_attach_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_enable_attach_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @cond exclude_from_gpa_documentation */ + +/** @} arrays group */ + +/** @endcond */ + +/** + * @brief Module load notification + * This API is used to report necessary information in case of bypassing default system loader. + * Notification should be done immidiatelly after this module is loaded to process memory. + * @param[in] start_addr - module start address + * @param[in] end_addr - module end address + * @param[in] path - file system full path to the module + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_module_loadA(void *start_addr, void *end_addr, const char *path); +void ITTAPI __itt_module_loadW(void *start_addr, void *end_addr, const wchar_t *path); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_module_load __itt_module_loadW +# define __itt_module_load_ptr __itt_module_loadW_ptr +#else /* UNICODE */ +# define __itt_module_load __itt_module_loadA +# define __itt_module_load_ptr __itt_module_loadA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +void ITTAPI __itt_module_load(void *start_addr, void *end_addr, const char *path); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, void, module_loadA, (void *start_addr, void *end_addr, const char *path)) +ITT_STUB(ITTAPI, void, module_loadW, (void *start_addr, void *end_addr, const wchar_t *path)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, void, module_load, (void *start_addr, void *end_addr, const char *path)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_module_loadA ITTNOTIFY_VOID(module_loadA) +#define __itt_module_loadA_ptr ITTNOTIFY_NAME(module_loadA) +#define __itt_module_loadW ITTNOTIFY_VOID(module_loadW) +#define __itt_module_loadW_ptr ITTNOTIFY_NAME(module_loadW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_module_load ITTNOTIFY_VOID(module_load) +#define __itt_module_load_ptr ITTNOTIFY_NAME(module_load) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_module_loadA(start_addr, end_addr, path) +#define __itt_module_loadA_ptr 0 +#define __itt_module_loadW(start_addr, end_addr, path) +#define __itt_module_loadW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_module_load(start_addr, end_addr, path) +#define __itt_module_load_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_module_loadA_ptr 0 +#define __itt_module_loadW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_module_load_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Report module unload + * This API is used to report necessary information in case of bypassing default system loader. + * Notification should be done just before the module is unloaded from process memory. + * @param[in] addr - base address of loaded module + */ +void ITTAPI __itt_module_unload(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, module_unload, (void *addr)) +#define __itt_module_unload ITTNOTIFY_VOID(module_unload) +#define __itt_module_unload_ptr ITTNOTIFY_NAME(module_unload) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_module_unload(addr) +#define __itt_module_unload_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_module_unload_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @cond exclude_from_documentation */ +typedef enum +{ + __itt_module_type_unknown = 0, + __itt_module_type_elf, + __itt_module_type_coff +} __itt_module_type; +/** @endcond */ + +/** @cond exclude_from_documentation */ +typedef enum +{ + itt_section_type_unknown, + itt_section_type_bss, /* notifies that the section contains uninitialized data. These are the relevant section types and the modules that contain them: + * ELF module: SHT_NOBITS section type + * COFF module: IMAGE_SCN_CNT_UNINITIALIZED_DATA section type + */ + itt_section_type_data, /* notifies that section contains initialized data. These are the relevant section types and the modules that contain them: + * ELF module: SHT_PROGBITS section type + * COFF module: IMAGE_SCN_CNT_INITIALIZED_DATA section type + */ + itt_section_type_text /* notifies that the section contains executable code. These are the relevant section types and the modules that contain them: + * ELF module: SHT_PROGBITS section type + * COFF module: IMAGE_SCN_CNT_CODE section type + */ +} __itt_section_type; +/** @endcond */ + +/** + * @hideinitializer + * @brief bit-mask, detects a section attribute that indicates whether a section can be executed as code: + * These are the relevant section attributes and the modules that contain them: + * ELF module: PF_X section attribute + * COFF module: IMAGE_SCN_MEM_EXECUTE attribute + */ +#define __itt_section_exec 0x20000000 + +/** + * @hideinitializer + * @brief bit-mask, detects a section attribute that indicates whether a section can be read. + * These are the relevant section attributes and the modules that contain them: + * ELF module: PF_R attribute + * COFF module: IMAGE_SCN_MEM_READ attribute + */ +#define __itt_section_read 0x40000000 + +/** + * @hideinitializer + * @brief bit-mask, detects a section attribute that indicates whether a section can be written to. + * These are the relevant section attributes and the modules that contain them: + * ELF module: PF_W attribute + * COFF module: IMAGE_SCN_MEM_WRITE attribute + */ +#define __itt_section_write 0x80000000 + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_section_info +{ + const char* name; /*!< Section name in UTF8 */ + __itt_section_type type; /*!< Section content and semantics description */ + size_t flags; /*!< Section bit flags that describe attributes using bit mask + * Zero if disabled, non-zero if enabled + */ + void* start_addr; /*!< Section load(relocated) start address */ + size_t size; /*!< Section file offset */ + size_t file_offset; /*!< Section size */ +} __itt_section_info; + +#pragma pack(pop) +/** @endcond */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_module_object +{ + unsigned int version; /*!< API version*/ + __itt_id module_id; /*!< Unique identifier. This is unchanged for sections that belong to the same module */ + __itt_module_type module_type; /*!< Binary module format */ + const char* module_name; /*!< Unique module name or path to module in UTF8 + * Contains module name when module_bufer and module_size exist + * Contains module path when module_bufer and module_size absent + * module_name remains the same for the certain module_id + */ + void* module_buffer; /*!< Module buffer content */ + size_t module_size; /*!< Module buffer size */ + /*!< If module_buffer and module_size exist, the binary module is dumped onto the system. + * If module_buffer and module_size do not exist, + * the binary module exists on the system already. + * The module_name parameter contains the path to the module. + */ + __itt_section_info* section_array; /*!< Reference to section information */ + size_t section_number; +} __itt_module_object; + +#pragma pack(pop) +/** @endcond */ + +/** + * @brief Load module content and its loaded(relocated) sections. + * This API is useful to save a module, or specify its location on the system and report information about loaded sections. + * The target module is saved on the system if module buffer content and size are available. + * If module buffer content and size are unavailable, the module name contains the path to the existing binary module. + * @param[in] module_obj - provides module and section information, along with unique module identifiers (name,module ID) + * which bind the binary module to particular sections. + */ +void ITTAPI __itt_module_load_with_sections(__itt_module_object* module_obj); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, module_load_with_sections, (__itt_module_object* module_obj)) +#define __itt_module_load_with_sections ITTNOTIFY_VOID(module_load_with_sections) +#define __itt_module_load_with_sections_ptr ITTNOTIFY_NAME(module_load_with_sections) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_module_load_with_sections(module_obj) +#define __itt_module_load_with_sections_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_module_load_with_sections_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Unload a module and its loaded(relocated) sections. + * This API notifies that the module and its sections were unloaded. + * @param[in] module_obj - provides module and sections information, along with unique module identifiers (name,module ID) + * which bind the binary module to particular sections. + */ +void ITTAPI __itt_module_unload_with_sections(__itt_module_object* module_obj); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, module_unload_with_sections, (__itt_module_object* module_obj)) +#define __itt_module_unload_with_sections ITTNOTIFY_VOID(module_unload_with_sections) +#define __itt_module_unload_with_sections_ptr ITTNOTIFY_NAME(module_unload_with_sections) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_module_unload_with_sections(module_obj) +#define __itt_module_unload_with_sections_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_module_unload_with_sections_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_histogram +{ + const __itt_domain* domain; /*!< Domain of the histogram*/ + const char* nameA; /*!< Name of the histogram */ +#if defined(UNICODE) || defined(_UNICODE) + const wchar_t* nameW; +#else /* UNICODE || _UNICODE */ + void* nameW; +#endif /* UNICODE || _UNICODE */ + __itt_metadata_type x_type; /*!< Type of the histogram X axis */ + __itt_metadata_type y_type; /*!< Type of the histogram Y axis */ + int extra1; /*!< Reserved to the runtime */ + void* extra2; /*!< Reserved to the runtime */ + struct ___itt_histogram* next; +} __itt_histogram; + +#pragma pack(pop) +/** @endcond */ + +/** + * @brief Create a typed histogram instance with given name/domain. + * @param[in] domain The domain controlling the call. + * @param[in] name The name of the histogram. + * @param[in] x_type The type of the X axis in histogram (may be 0 to calculate batch statistics). + * @param[in] y_type The type of the Y axis in histogram. +*/ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_histogram* ITTAPI __itt_histogram_createA(const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type); +__itt_histogram* ITTAPI __itt_histogram_createW(const __itt_domain* domain, const wchar_t* name, __itt_metadata_type x_type, __itt_metadata_type y_type); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_histogram_create __itt_histogram_createW +# define __itt_histogram_create_ptr __itt_histogram_createW_ptr +#else /* UNICODE */ +# define __itt_histogram_create __itt_histogram_createA +# define __itt_histogram_create_ptr __itt_histogram_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_histogram* ITTAPI __itt_histogram_create(const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_histogram*, histogram_createA, (const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type)) +ITT_STUB(ITTAPI, __itt_histogram*, histogram_createW, (const __itt_domain* domain, const wchar_t* name, __itt_metadata_type x_type, __itt_metadata_type y_type)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_histogram*, histogram_create, (const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_histogram_createA ITTNOTIFY_DATA(histogram_createA) +#define __itt_histogram_createA_ptr ITTNOTIFY_NAME(histogram_createA) +#define __itt_histogram_createW ITTNOTIFY_DATA(histogram_createW) +#define __itt_histogram_createW_ptr ITTNOTIFY_NAME(histogram_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_histogram_create ITTNOTIFY_DATA(histogram_create) +#define __itt_histogram_create_ptr ITTNOTIFY_NAME(histogram_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_histogram_createA(domain, name, x_type, y_type) (__itt_histogram*)0 +#define __itt_histogram_createA_ptr 0 +#define __itt_histogram_createW(domain, name, x_type, y_type) (__itt_histogram*)0 +#define __itt_histogram_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_histogram_create(domain, name, x_type, y_type) (__itt_histogram*)0 +#define __itt_histogram_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_histogram_createA_ptr 0 +#define __itt_histogram_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_histogram_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Submit statistics for a histogram instance. + * @param[in] hist Pointer to the histogram instance to which the histogram statistic is to be dumped. + * @param[in] length The number of elements in dumped axis data array. + * @param[in] x_data The X axis dumped data itself (may be NULL to calculate batch statistics). + * @param[in] y_data The Y axis dumped data itself. +*/ +void ITTAPI __itt_histogram_submit(__itt_histogram* hist, size_t length, void* x_data, void* y_data); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, histogram_submit, (__itt_histogram* hist, size_t length, void* x_data, void* y_data)) +#define __itt_histogram_submit ITTNOTIFY_VOID(histogram_submit) +#define __itt_histogram_submit_ptr ITTNOTIFY_NAME(histogram_submit) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_histogram_submit(hist, length, x_data, y_data) +#define __itt_histogram_submit_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_histogram_submit_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ + +/** +* @brief function allows to obtain the current collection state at the moment +* @return collection state as a enum __itt_collection_state +*/ +__itt_collection_state __itt_get_collection_state(void); + +/** +* @brief function releases resources allocated by ITT API static part +* this API should be called from the library destructor +* @return void +*/ +void __itt_release_resources(void); +/** @endcond */ + +/** + * @brief Create a typed counter with given domain pointer, string name and counter type +*/ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_counter ITTAPI __itt_counter_createA_v3(const __itt_domain* domain, const char* name, __itt_metadata_type type); +__itt_counter ITTAPI __itt_counter_createW_v3(const __itt_domain* domain, const wchar_t* name, __itt_metadata_type type); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_counter_create_v3 __itt_counter_createW_v3 +# define __itt_counter_create_v3_ptr __itt_counter_createW_v3_ptr +#else /* UNICODE */ +# define __itt_counter_create_v3 __itt_counter_createA_v3 +# define __itt_counter_create_v3_ptr __itt_counter_createA_v3_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_counter ITTAPI __itt_counter_create_v3(const __itt_domain* domain, const char* name, __itt_metadata_type type); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_counter, counter_createA_v3, (const __itt_domain* domain, const char* name, __itt_metadata_type type)) +ITT_STUB(ITTAPI, __itt_counter, counter_createW_v3, (const __itt_domain* domain, const wchar_t* name, __itt_metadata_type type)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_counter, counter_create_v3, (const __itt_domain* domain, const char* name, __itt_metadata_type type)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_createA_v3 ITTNOTIFY_DATA(counter_createA_v3) +#define __itt_counter_createA_v3_ptr ITTNOTIFY_NAME(counter_createA_v3) +#define __itt_counter_createW_v3 ITTNOTIFY_DATA(counter_createW_v3) +#define __itt_counter_createW_v3_ptr ITTNOTIFY_NAME(counter_createW_v3) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_v3 ITTNOTIFY_DATA(counter_create_v3) +#define __itt_counter_create_v3_ptr ITTNOTIFY_NAME(counter_create_v3) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_createA_v3(domain, name, type) (__itt_counter)0 +#define __itt_counter_createA_v3_ptr 0 +#define __itt_counter_createW_v3(domain, name, type) (__itt_counter)0 +#define __itt_counter_create_typedW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_v3(domain, name, type) (__itt_counter)0 +#define __itt_counter_create_v3_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_createA_v3_ptr 0 +#define __itt_counter_createW_v3_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_v3_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Set the counter value api + */ +void ITTAPI __itt_counter_set_value_v3(__itt_counter counter, void *value_ptr); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_set_value_v3, (__itt_counter counter, void *value_ptr)) +#define __itt_counter_set_value_v3 ITTNOTIFY_VOID(counter_set_value_v3) +#define __itt_counter_set_value_v3_ptr ITTNOTIFY_NAME(counter_set_value_v3) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_set_value_v3(counter, value_ptr) +#define __itt_counter_set_value_v3_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_set_value_v3_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief describes the type of context metadata +*/ +typedef enum { + __itt_context_unknown = 0, /*!< Undefined type */ + __itt_context_nameA, /*!< ASCII string char* type */ + __itt_context_nameW, /*!< Unicode string wchar_t* type */ + __itt_context_deviceA, /*!< ASCII string char* type */ + __itt_context_deviceW, /*!< Unicode string wchar_t* type */ + __itt_context_unitsA, /*!< ASCII string char* type */ + __itt_context_unitsW, /*!< Unicode string wchar_t* type */ + __itt_context_pci_addrA, /*!< ASCII string char* type */ + __itt_context_pci_addrW, /*!< Unicode string wchar_t* type */ + __itt_context_tid, /*!< Unsigned 64-bit integer type */ + __itt_context_max_val, /*!< Unsigned 64-bit integer type */ + __itt_context_bandwidth_flag, /*!< Unsigned 64-bit integer type */ + __itt_context_latency_flag, /*!< Unsigned 64-bit integer type */ + __itt_context_occupancy_flag, /*!< Unsigned 64-bit integer type */ + __itt_context_on_thread_flag, /*!< Unsigned 64-bit integer type */ + __itt_context_is_abs_val_flag, /*!< Unsigned 64-bit integer type */ + __itt_context_cpu_instructions_flag, /*!< Unsigned 64-bit integer type */ + __itt_context_cpu_cycles_flag /*!< Unsigned 64-bit integer type */ +} __itt_context_type; + +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_context_name __itt_context_nameW +# define __itt_context_device __itt_context_deviceW +# define __itt_context_units __itt_context_unitsW +# define __itt_context_pci_addr __itt_context_pci_addrW +#else /* UNICODE || _UNICODE */ +# define __itt_context_name __itt_context_nameA +# define __itt_context_device __itt_context_deviceA +# define __itt_context_units __itt_context_unitsA +# define __itt_context_pci_addr __itt_context_pci_addrA +#endif /* UNICODE || _UNICODE */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_context_metadata +{ + __itt_context_type type; /*!< Type of the context metadata value */ + void* value; /*!< Pointer to context metadata value itself */ +} __itt_context_metadata; + +#pragma pack(pop) +/** @endcond */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_counter_metadata +{ + __itt_counter counter; /*!< Associated context metadata counter */ + __itt_context_type type; /*!< Type of the context metadata value */ + const char* str_valueA; /*!< String context metadata value */ +#if defined(UNICODE) || defined(_UNICODE) + const wchar_t* str_valueW; +#else /* UNICODE || _UNICODE */ + void* str_valueW; +#endif /* UNICODE || _UNICODE */ + unsigned long long value; /*!< Numeric context metadata value */ + int extra1; /*!< Reserved to the runtime */ + void* extra2; /*!< Reserved to the runtime */ + struct ___itt_counter_metadata* next; +} __itt_counter_metadata; + +#pragma pack(pop) +/** @endcond */ + +/** + * @brief Bind context metadata to counter instance + * @param[in] counter Pointer to the counter instance to which the context metadata is to be associated. + * @param[in] length The number of elements in context metadata array. + * @param[in] metadata The context metadata itself. +*/ +void ITTAPI __itt_bind_context_metadata_to_counter(__itt_counter counter, size_t length, __itt_context_metadata* metadata); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, bind_context_metadata_to_counter, (__itt_counter counter, size_t length, __itt_context_metadata* metadata)) +#define __itt_bind_context_metadata_to_counter ITTNOTIFY_VOID(bind_context_metadata_to_counter) +#define __itt_bind_context_metadata_to_counter_ptr ITTNOTIFY_NAME(bind_context_metadata_to_counter) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_bind_context_metadata_to_counter(counter, length, metadata) +#define __itt_bind_context_metadata_to_counter_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_bind_context_metadata_to_counter_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _ITTNOTIFY_H_ */ + +#ifdef INTEL_ITTNOTIFY_API_PRIVATE + +#ifndef _ITTNOTIFY_PRIVATE_ +#define _ITTNOTIFY_PRIVATE_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @ingroup clockdomain + * @brief Begin an overlapped task instance. + * @param[in] domain The domain for this task + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] taskid The identifier for this task instance, *cannot* be __itt_null. + * @param[in] parentid The parent of this task, or __itt_null. + * @param[in] name The name of this task. + */ +void ITTAPI __itt_task_begin_overlapped_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name); + +/** + * @ingroup clockdomain + * @brief End an overlapped task instance. + * @param[in] domain The domain for this task + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] taskid Explicit ID of finished task + */ +void ITTAPI __itt_task_end_overlapped_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, task_begin_overlapped_ex, (const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name)) +ITT_STUBV(ITTAPI, void, task_end_overlapped_ex, (const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid)) +#define __itt_task_begin_overlapped_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(task_begin_overlapped_ex,d,x,y,z,a,b) +#define __itt_task_begin_overlapped_ex_ptr ITTNOTIFY_NAME(task_begin_overlapped_ex) +#define __itt_task_end_overlapped_ex(d,x,y,z) ITTNOTIFY_VOID_D3(task_end_overlapped_ex,d,x,y,z) +#define __itt_task_end_overlapped_ex_ptr ITTNOTIFY_NAME(task_end_overlapped_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_task_begin_overlapped_ex(domain,clock_domain,timestamp,taskid,parentid,name) +#define __itt_task_begin_overlapped_ex_ptr 0 +#define __itt_task_end_overlapped_ex(domain,clock_domain,timestamp,taskid) +#define __itt_task_end_overlapped_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_task_begin_overlapped_ex_ptr 0 +#define __itt_task_end_overlapped_ptr 0 +#define __itt_task_end_overlapped_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @defgroup makrs_internal Marks + * @ingroup internal + * Marks group + * @warning Internal API: + * - It is not shipped to outside of Intel + * - It is delivered to internal Intel teams using e-mail or SVN access only + * @{ + */ +/** @brief user mark type */ +typedef int __itt_mark_type; + +/** + * @brief Creates a user mark type with the specified name using char or Unicode string. + * @param[in] name - name of mark to create + * @return Returns a handle to the mark type + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_mark_type ITTAPI __itt_mark_createA(const char *name); +__itt_mark_type ITTAPI __itt_mark_createW(const wchar_t *name); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_mark_create __itt_mark_createW +# define __itt_mark_create_ptr __itt_mark_createW_ptr +#else /* UNICODE */ +# define __itt_mark_create __itt_mark_createA +# define __itt_mark_create_ptr __itt_mark_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_mark_type ITTAPI __itt_mark_create(const char *name); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_mark_type, mark_createA, (const char *name)) +ITT_STUB(ITTAPI, __itt_mark_type, mark_createW, (const wchar_t *name)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_mark_type, mark_create, (const char *name)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_mark_createA ITTNOTIFY_DATA(mark_createA) +#define __itt_mark_createA_ptr ITTNOTIFY_NAME(mark_createA) +#define __itt_mark_createW ITTNOTIFY_DATA(mark_createW) +#define __itt_mark_createW_ptr ITTNOTIFY_NAME(mark_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_create ITTNOTIFY_DATA(mark_create) +#define __itt_mark_create_ptr ITTNOTIFY_NAME(mark_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_mark_createA(name) (__itt_mark_type)0 +#define __itt_mark_createA_ptr 0 +#define __itt_mark_createW(name) (__itt_mark_type)0 +#define __itt_mark_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_create(name) (__itt_mark_type)0 +#define __itt_mark_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_mark_createA_ptr 0 +#define __itt_mark_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Creates a "discrete" user mark type of the specified type and an optional parameter using char or Unicode string. + * + * - The mark of "discrete" type is placed to collection results in case of success. It appears in overtime view(s) as a special tick sign. + * - The call is "synchronous" - function returns after mark is actually added to results. + * - This function is useful, for example, to mark different phases of application + * (beginning of the next mark automatically meand end of current region). + * - Can be used together with "continuous" marks (see below) at the same collection session + * @param[in] mt - mark, created by __itt_mark_create(const char* name) function + * @param[in] parameter - string parameter of mark + * @return Returns zero value in case of success, non-zero value otherwise. + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +int ITTAPI __itt_markA(__itt_mark_type mt, const char *parameter); +int ITTAPI __itt_markW(__itt_mark_type mt, const wchar_t *parameter); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_mark __itt_markW +# define __itt_mark_ptr __itt_markW_ptr +#else /* UNICODE */ +# define __itt_mark __itt_markA +# define __itt_mark_ptr __itt_markA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +int ITTAPI __itt_mark(__itt_mark_type mt, const char *parameter); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, int, markA, (__itt_mark_type mt, const char *parameter)) +ITT_STUB(ITTAPI, int, markW, (__itt_mark_type mt, const wchar_t *parameter)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, int, mark, (__itt_mark_type mt, const char *parameter)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_markA ITTNOTIFY_DATA(markA) +#define __itt_markA_ptr ITTNOTIFY_NAME(markA) +#define __itt_markW ITTNOTIFY_DATA(markW) +#define __itt_markW_ptr ITTNOTIFY_NAME(markW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark ITTNOTIFY_DATA(mark) +#define __itt_mark_ptr ITTNOTIFY_NAME(mark) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_markA(mt, parameter) (int)0 +#define __itt_markA_ptr 0 +#define __itt_markW(mt, parameter) (int)0 +#define __itt_markW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark(mt, parameter) (int)0 +#define __itt_mark_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_markA_ptr 0 +#define __itt_markW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Use this if necessary to create a "discrete" user event type (mark) for process + * rather then for one thread + * @see int __itt_mark(__itt_mark_type mt, const char* parameter); + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +int ITTAPI __itt_mark_globalA(__itt_mark_type mt, const char *parameter); +int ITTAPI __itt_mark_globalW(__itt_mark_type mt, const wchar_t *parameter); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_mark_global __itt_mark_globalW +# define __itt_mark_global_ptr __itt_mark_globalW_ptr +#else /* UNICODE */ +# define __itt_mark_global __itt_mark_globalA +# define __itt_mark_global_ptr __itt_mark_globalA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +int ITTAPI __itt_mark_global(__itt_mark_type mt, const char *parameter); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, int, mark_globalA, (__itt_mark_type mt, const char *parameter)) +ITT_STUB(ITTAPI, int, mark_globalW, (__itt_mark_type mt, const wchar_t *parameter)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, int, mark_global, (__itt_mark_type mt, const char *parameter)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_mark_globalA ITTNOTIFY_DATA(mark_globalA) +#define __itt_mark_globalA_ptr ITTNOTIFY_NAME(mark_globalA) +#define __itt_mark_globalW ITTNOTIFY_DATA(mark_globalW) +#define __itt_mark_globalW_ptr ITTNOTIFY_NAME(mark_globalW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_global ITTNOTIFY_DATA(mark_global) +#define __itt_mark_global_ptr ITTNOTIFY_NAME(mark_global) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_mark_globalA(mt, parameter) (int)0 +#define __itt_mark_globalA_ptr 0 +#define __itt_mark_globalW(mt, parameter) (int)0 +#define __itt_mark_globalW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_global(mt, parameter) (int)0 +#define __itt_mark_global_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_mark_globalA_ptr 0 +#define __itt_mark_globalW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_global_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Creates an "end" point for "continuous" mark with specified name. + * + * - Returns zero value in case of success, non-zero value otherwise. + * Also returns non-zero value when preceding "begin" point for the + * mark with the same name failed to be created or not created. + * - The mark of "continuous" type is placed to collection results in + * case of success. It appears in overtime view(s) as a special tick + * sign (different from "discrete" mark) together with line from + * corresponding "begin" mark to "end" mark. + * @note Continuous marks can overlap and be nested inside each other. + * Discrete mark can be nested inside marked region + * @param[in] mt - mark, created by __itt_mark_create(const char* name) function + * @return Returns zero value in case of success, non-zero value otherwise. + */ +int ITTAPI __itt_mark_off(__itt_mark_type mt); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, int, mark_off, (__itt_mark_type mt)) +#define __itt_mark_off ITTNOTIFY_DATA(mark_off) +#define __itt_mark_off_ptr ITTNOTIFY_NAME(mark_off) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_mark_off(mt) (int)0 +#define __itt_mark_off_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_mark_off_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Use this if necessary to create an "end" point for mark of process + * @see int __itt_mark_off(__itt_mark_type mt); + */ +int ITTAPI __itt_mark_global_off(__itt_mark_type mt); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, int, mark_global_off, (__itt_mark_type mt)) +#define __itt_mark_global_off ITTNOTIFY_DATA(mark_global_off) +#define __itt_mark_global_off_ptr ITTNOTIFY_NAME(mark_global_off) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_mark_global_off(mt) (int)0 +#define __itt_mark_global_off_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_mark_global_off_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} marks group */ + +/** + * @defgroup counters_internal Counters + * @ingroup internal + * Counters group + * @{ + */ + + +/** + * @defgroup stitch Stack Stitching + * @ingroup internal + * Stack Stitching group + * @{ + */ +/** + * @brief opaque structure for counter identification + */ +typedef struct ___itt_caller *__itt_caller; + +/** + * @brief Create the stitch point e.g. a point in call stack where other stacks should be stitched to. + * The function returns a unique identifier which is used to match the cut points with corresponding stitch points. + */ +__itt_caller ITTAPI __itt_stack_caller_create(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, __itt_caller, stack_caller_create, (void)) +#define __itt_stack_caller_create ITTNOTIFY_DATA(stack_caller_create) +#define __itt_stack_caller_create_ptr ITTNOTIFY_NAME(stack_caller_create) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_stack_caller_create() (__itt_caller)0 +#define __itt_stack_caller_create_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_stack_caller_create_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Destroy the information about stitch point identified by the pointer previously returned by __itt_stack_caller_create() + */ +void ITTAPI __itt_stack_caller_destroy(__itt_caller id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, stack_caller_destroy, (__itt_caller id)) +#define __itt_stack_caller_destroy ITTNOTIFY_VOID(stack_caller_destroy) +#define __itt_stack_caller_destroy_ptr ITTNOTIFY_NAME(stack_caller_destroy) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_stack_caller_destroy(id) +#define __itt_stack_caller_destroy_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_stack_caller_destroy_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Sets the cut point. Stack from each event which occurs after this call will be cut + * at the same stack level the function was called and stitched to the corresponding stitch point. + */ +void ITTAPI __itt_stack_callee_enter(__itt_caller id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, stack_callee_enter, (__itt_caller id)) +#define __itt_stack_callee_enter ITTNOTIFY_VOID(stack_callee_enter) +#define __itt_stack_callee_enter_ptr ITTNOTIFY_NAME(stack_callee_enter) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_stack_callee_enter(id) +#define __itt_stack_callee_enter_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_stack_callee_enter_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief This function eliminates the cut point which was set by latest __itt_stack_callee_enter(). + */ +void ITTAPI __itt_stack_callee_leave(__itt_caller id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, stack_callee_leave, (__itt_caller id)) +#define __itt_stack_callee_leave ITTNOTIFY_VOID(stack_callee_leave) +#define __itt_stack_callee_leave_ptr ITTNOTIFY_NAME(stack_callee_leave) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_stack_callee_leave(id) +#define __itt_stack_callee_leave_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_stack_callee_leave_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @} stitch group */ + +/* ***************************************************************************************************************************** */ + +#include <stdarg.h> + +/** @cond exclude_from_documentation */ +typedef enum __itt_error_code +{ + __itt_error_success = 0, /*!< no error */ + __itt_error_no_module = 1, /*!< module can't be loaded */ + /* %1$s -- library name; win: %2$d -- system error code; unx: %2$s -- system error message. */ + __itt_error_no_symbol = 2, /*!< symbol not found */ + /* %1$s -- library name, %2$s -- symbol name. */ + __itt_error_unknown_group = 3, /*!< unknown group specified */ + /* %1$s -- env var name, %2$s -- group name. */ + __itt_error_cant_read_env = 4, /*!< GetEnvironmentVariable() failed */ + /* %1$s -- env var name, %2$d -- system error. */ + __itt_error_env_too_long = 5, /*!< variable value too long */ + /* %1$s -- env var name, %2$d -- actual length of the var, %3$d -- max allowed length. */ + __itt_error_system = 6 /*!< pthread_mutexattr_init or pthread_mutex_init failed */ + /* %1$s -- function name, %2$d -- errno. */ +} __itt_error_code; + +typedef void (__itt_error_handler_t)(__itt_error_code code, va_list); +__itt_error_handler_t* __itt_set_error_handler(__itt_error_handler_t*); + +const char* ITTAPI __itt_api_version(void); +/** @endcond */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#define __itt_error_handler ITT_JOIN(INTEL_ITTNOTIFY_PREFIX, error_handler) +void __itt_error_handler(__itt_error_code code, va_list args); +extern const int ITTNOTIFY_NAME(err); +#define __itt_err ITTNOTIFY_NAME(err) +ITT_STUB(ITTAPI, const char*, api_version, (void)) +#define __itt_api_version ITTNOTIFY_DATA(api_version) +#define __itt_api_version_ptr ITTNOTIFY_NAME(api_version) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_api_version() (const char*)0 +#define __itt_api_version_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_api_version_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _ITTNOTIFY_PRIVATE_ */ + +#endif /* INTEL_ITTNOTIFY_API_PRIVATE */ diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/file.h b/contrib/restricted/aws/aws-c-common/include/aws/common/file.h index 842b777d8f1..23937e866ad 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/file.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/file.h @@ -191,6 +191,8 @@ bool aws_path_exists(const struct aws_string *path); * fseeko() on linux * * whence can either be SEEK_SET or SEEK_END + * + * Returns AWS_OP_SUCCESS, or AWS_OP_ERR (after an error has been raised). */ AWS_COMMON_API int aws_fseek(FILE *file, int64_t offset, int whence); diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h b/contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h index 370b1c38db9..c2cfadd7780 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h @@ -12,9 +12,11 @@ AWS_PUSH_SANE_WARNING_LEVEL -#define AWS_COMMON_HASH_TABLE_ITER_CONTINUE (1 << 0) -#define AWS_COMMON_HASH_TABLE_ITER_DELETE (1 << 1) -#define AWS_COMMON_HASH_TABLE_ITER_ERROR (1 << 2) +enum { + AWS_COMMON_HASH_TABLE_ITER_CONTINUE = (1 << 0), + AWS_COMMON_HASH_TABLE_ITER_DELETE = (1 << 1), + AWS_COMMON_HASH_TABLE_ITER_ERROR = (1 << 2), +}; /** * Hash table data structure. This module provides an automatically resizing @@ -101,8 +103,9 @@ typedef uint64_t(aws_hash_fn)(const void *key); * keys, but note that the same type is used for a function that compares * two hash table values in aws_hash_table_eq. * - * Equality functions used in a hash table must be reflexive (i.e., a == b if - * and only if b == a), and must be consistent with the hash function in use. + * Equality functions used in a hash table must be be reflexive (a == a), + * symmetric (a == b => b == a), transitive (a == b, b == c => a == c) + * and consistent (result does not change with time). */ typedef bool(aws_hash_callback_eq_fn)(const void *a, const void *b); @@ -273,6 +276,7 @@ int aws_hash_table_create( * * Returns AWS_OP_SUCCESS if an item was found or created. * Raises AWS_ERROR_OOM if hash table expansion was required and memory + * allocation failed. */ AWS_COMMON_API int aws_hash_table_put(struct aws_hash_table *map, const void *key, void *value, int *was_created); @@ -430,6 +434,20 @@ bool aws_hash_table_is_valid(const struct aws_hash_table *map); AWS_COMMON_API bool aws_hash_iter_is_valid(const struct aws_hash_iter *iter); +/** + * Helper function to hash keys that are uint64_t values. + * + * The function is not a strong hash function in any sense; it merely reflects + * the uint64 value back. Do not use this function as a hash if you need + * the properties of a strong hash function. + */ +AWS_COMMON_API uint64_t aws_hash_uint64_t_by_identity(const void *item); + +/** + * Helper function to compare hash keys that are uint64_t values. + */ +AWS_COMMON_API bool aws_hash_compare_uint64_t_eq(const void *a, const void *b); + AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/host_utils.h b/contrib/restricted/aws/aws-c-common/include/aws/common/host_utils.h new file mode 100644 index 00000000000..0b8285d7a50 --- /dev/null +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/host_utils.h @@ -0,0 +1,28 @@ +#ifndef AWS_COMMON_HOST_UTILS_H +#define AWS_COMMON_HOST_UTILS_H +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/common/common.h> + +struct aws_byte_cursor; + +AWS_PUSH_SANE_WARNING_LEVEL +AWS_EXTERN_C_BEGIN + +/* + * Determine whether host cursor is IPv4 string. + */ +AWS_COMMON_API bool aws_host_utils_is_ipv4(struct aws_byte_cursor host); + +/* + * Determine whether host cursor is IPv6 string. + * Supports checking for uri encoded strings and scoped literals. + */ +AWS_COMMON_API bool aws_host_utils_is_ipv6(struct aws_byte_cursor host, bool is_uri_encoded); + +AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL + +#endif /* AWS_COMMON_HOST_UTILS_H */ diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/json.h b/contrib/restricted/aws/aws-c-common/include/aws/common/json.h index b614387c0fa..b8c4e6cfe33 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/json.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/json.h @@ -400,7 +400,7 @@ int aws_byte_buf_append_json_string(const struct aws_json_value *value, struct a * @param value The aws_json_value to format. * @param output The destination for the JSON string * @return AWS_OP_SUCCESS if the JSON string was allocated to output without any errors - * Will return AWS_ERROR_INVALID_ARGUMENT if the value passed is not an aws_json_value or if there + * Will return AWS_OP_ERR if the value passed is not an aws_json_value or if there * aws an error appending the JSON into the byte buffer. */ AWS_COMMON_API diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/linked_list.h b/contrib/restricted/aws/aws-c-common/include/aws/common/linked_list.h index 3f550c6fe5d..cb0ce655c30 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/linked_list.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/linked_list.h @@ -182,6 +182,11 @@ AWS_STATIC_IMPL void aws_linked_list_move_all_front( struct aws_linked_list *AWS_RESTRICT dst, struct aws_linked_list *AWS_RESTRICT src); +/** + * Returns true if the node is currently in a list, false otherwise. + */ +AWS_STATIC_IMPL bool aws_linked_list_node_is_in_list(struct aws_linked_list_node *node); + #ifndef AWS_NO_STATIC_IMPL # include <aws/common/linked_list.inl> #endif /* AWS_NO_STATIC_IMPL */ diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/linked_list.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/linked_list.inl index 99604834af5..4d7e54dd066 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/linked_list.inl +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/linked_list.inl @@ -431,6 +431,10 @@ AWS_STATIC_IMPL void aws_linked_list_move_all_front( AWS_POSTCONDITION(aws_linked_list_is_valid(dst)); } +AWS_STATIC_IMPL bool aws_linked_list_node_is_in_list(struct aws_linked_list_node *node) { + return aws_linked_list_node_prev_is_valid(node) && aws_linked_list_node_next_is_valid(node); +} + AWS_EXTERN_C_END #endif /* AWS_COMMON_LINKED_LIST_INL */ diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h b/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h index 87b956811f0..009996475e1 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h @@ -58,7 +58,9 @@ enum aws_log_level { typedef uint32_t aws_log_subject_t; /* Each library gets space for 2^^10 log subject entries */ -#define AWS_LOG_SUBJECT_STRIDE_BITS 10 +enum { + AWS_LOG_SUBJECT_STRIDE_BITS = 10, +}; #define AWS_LOG_SUBJECT_STRIDE (1U << AWS_LOG_SUBJECT_STRIDE_BITS) #define AWS_LOG_SUBJECT_BEGIN_RANGE(x) ((x)*AWS_LOG_SUBJECT_STRIDE) #define AWS_LOG_SUBJECT_END_RANGE(x) (((x) + 1) * AWS_LOG_SUBJECT_STRIDE - 1) diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h b/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h index 648e39022da..da329854476 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h @@ -61,7 +61,7 @@ AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1) == 1); AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1, 2) == 2); AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1, 2, 3) == 3); -#define AWS_CACHE_LINE 64 +enum { AWS_CACHE_LINE = 64 }; /** * Format macro for strings of a specified length. * Allows non null-terminated strings to be used with the printf family of functions. diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h b/contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h index b64e5e32f9a..3a491abc984 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h @@ -8,7 +8,7 @@ #include <aws/common/common.h> #ifdef _WIN32 -/* NOTE: Do not use this macro before including Windows.h */ +/* NOTE: Do not use this macro before including windows.h */ # define AWSMUTEX_TO_WINDOWS(pMutex) (PSRWLOCK) & (pMutex)->mutex_handle #else # include <pthread.h> diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/package.h b/contrib/restricted/aws/aws-c-common/include/aws/common/package.h index 0d7b3c2a5d8..c93fd5109ca 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/package.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/package.h @@ -10,7 +10,7 @@ * Preliminary cap on the number of possible aws-c-libraries participating in shared enum ranges for * errors, log subjects, and other cross-library enums. Expandable as needed */ -#define AWS_PACKAGE_SLOTS 16 +#define AWS_PACKAGE_SLOTS 32 /* * Each aws-c-* and aws-crt-* library has a unique package id starting from zero. These are used to macro-calculate diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h b/contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h index 4478b95d005..26b35e9911c 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h @@ -163,6 +163,12 @@ AWS_COMMON_API int aws_priority_queue_top(const struct aws_priority_queue *queue, void **item); /** + * Removes all elements from the queue, but does not free internal memory. + */ +AWS_COMMON_API +void aws_priority_queue_clear(struct aws_priority_queue *queue); + +/** * Current number of elements in the queue */ AWS_COMMON_API @@ -175,6 +181,24 @@ size_t aws_priority_queue_size(const struct aws_priority_queue *queue); AWS_COMMON_API size_t aws_priority_queue_capacity(const struct aws_priority_queue *queue); +/** + * Initializes a queue node to a default value that indicates the node is not in the queue. + * + * @param node priority queue node to initialize with a default value + */ +AWS_COMMON_API +void aws_priority_queue_node_init(struct aws_priority_queue_node *node); + +/** + * Checks if a priority queue node is currently in a priority queue. + * + * @param node priority queue node to check usage for + * + * @return true if the node is in a queue, false otherwise + */ +AWS_COMMON_API +bool aws_priority_queue_node_is_in_queue(const struct aws_priority_queue_node *node); + AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/private/system_info_priv.h b/contrib/restricted/aws/aws-c-common/include/aws/common/private/system_info_priv.h new file mode 100644 index 00000000000..27b1d4ad1b8 --- /dev/null +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/private/system_info_priv.h @@ -0,0 +1,37 @@ +#ifndef AWS_COMMON_PRIVATE_SYSTEM_INFO_PRIV_H +#define AWS_COMMON_PRIVATE_SYSTEM_INFO_PRIV_H +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/common/byte_buf.h> +#include <aws/common/ref_count.h> +#include <aws/common/string.h> +#include <aws/common/system_info.h> + +struct aws_system_environment { + struct aws_allocator *allocator; + struct aws_ref_count ref_count; + struct aws_byte_buf virtualization_vendor; + struct aws_byte_buf product_name; + enum aws_platform_os os; + size_t cpu_count; + size_t cpu_group_count; + void *impl; +}; + +/** + * For internal implementors. Fill in info in env that you're able to grab, such as dmi info, os version strings etc... + * in here. The default just returns AWS_OP_SUCCESS. This is currently only implemented for linux. + * + * Returns AWS_OP_ERR if the implementation wasn't able to fill in required information for the platform. + */ +int aws_system_environment_load_platform_impl(struct aws_system_environment *env); + +/** + * For internal implementors. Cleans up anything allocated in aws_system_environment_load_platform_impl, + * but does not release the memory for env. + */ +void aws_system_environment_destroy_platform_impl(struct aws_system_environment *env); + +#endif // AWS_COMMON_PRIVATE_SYSTEM_INFO_PRIV_H diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/private/xml_parser_impl.h b/contrib/restricted/aws/aws-c-common/include/aws/common/private/xml_parser_impl.h index 8f022a4e0bb..eea061b1e69 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/private/xml_parser_impl.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/private/xml_parser_impl.h @@ -9,6 +9,7 @@ #include <aws/common/xml_parser.h> struct aws_xml_node { + struct aws_xml_parser *parser; struct aws_byte_cursor name; struct aws_array_list attributes; struct aws_byte_cursor doc_at_body; @@ -25,7 +26,6 @@ struct aws_xml_parser { struct aws_byte_cursor split_scratch[11]; size_t max_depth; int error; - bool stop_parsing; }; #endif /* AWS_COMMON_PRIVATE_XML_PARSER_IMPL_H */ diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h b/contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h index f8d86ae2a25..538841f6382 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h @@ -8,7 +8,7 @@ #include <aws/common/common.h> #ifdef _WIN32 -/* NOTE: Do not use this macro before including Windows.h */ +/* NOTE: Do not use this macro before including windows.h */ # define AWSSRW_TO_WINDOWS(pCV) (PSRWLOCK) pCV #else # include <pthread.h> diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/statistics.h b/contrib/restricted/aws/aws-c-common/include/aws/common/statistics.h index 3ad06a429b4..f9d432d3346 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/statistics.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/statistics.h @@ -18,7 +18,10 @@ struct aws_array_list; typedef uint32_t aws_crt_statistics_category_t; /* Each library gets space for 2^^8 category entries */ -#define AWS_CRT_STATISTICS_CATEGORY_STRIDE_BITS 8 +enum { + AWS_CRT_STATISTICS_CATEGORY_STRIDE_BITS = 8, +}; + #define AWS_CRT_STATISTICS_CATEGORY_STRIDE (1U << AWS_CRT_STATISTICS_CATEGORY_STRIDE_BITS) #define AWS_CRT_STATISTICS_CATEGORY_BEGIN_RANGE(x) ((x)*AWS_CRT_STATISTICS_CATEGORY_STRIDE) #define AWS_CRT_STATISTICS_CATEGORY_END_RANGE(x) (((x) + 1) * AWS_CRT_STATISTICS_CATEGORY_STRIDE - 1) diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h b/contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h index fe7604120d2..91da41f9d8f 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h @@ -6,6 +6,7 @@ * SPDX-License-Identifier: Apache-2.0. */ +#include <aws/common/byte_buf.h> #include <aws/common/common.h> AWS_PUSH_SANE_WARNING_LEVEL @@ -21,8 +22,54 @@ struct aws_cpu_info { bool suspected_hyper_thread; }; +struct aws_system_environment; + AWS_EXTERN_C_BEGIN +/** + * Allocates and initializes information about the system the current process is executing on. + * If successful returns an instance of aws_system_environment. If it fails, it will return NULL. + * + * Note: This api is used internally and is still early in its evolution. + * It may change in incompatible ways in the future. + */ +AWS_COMMON_API +struct aws_system_environment *aws_system_environment_load(struct aws_allocator *allocator); + +AWS_COMMON_API +struct aws_system_environment *aws_system_environment_acquire(struct aws_system_environment *env); + +AWS_COMMON_API +void aws_system_environment_release(struct aws_system_environment *env); + +/** + * Returns the virtualization vendor for the specified compute environment, e.g. "Xen, Amazon EC2, etc..." + * + * The return value may be empty and in that case no vendor was detected. + */ +AWS_COMMON_API +struct aws_byte_cursor aws_system_environment_get_virtualization_vendor(const struct aws_system_environment *env); + +/** + * Returns the product name for the specified compute environment. For example, the Amazon EC2 Instance type. + * + * The return value may be empty and in that case no vendor was detected. + */ +AWS_COMMON_API +struct aws_byte_cursor aws_system_environment_get_virtualization_product_name(const struct aws_system_environment *env); + +/** + * Returns the number of processors for the specified compute environment. + */ +AWS_COMMON_API +size_t aws_system_environment_get_processor_count(struct aws_system_environment *env); + +/** + * Returns the number of separate cpu groupings (multi-socket configurations or NUMA). + */ +AWS_COMMON_API +size_t aws_system_environment_get_cpu_group_count(const struct aws_system_environment *env); + /* Returns the OS this was built under */ AWS_COMMON_API enum aws_platform_os aws_get_platform_build_os(void); diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/system_resource_util.h b/contrib/restricted/aws/aws-c-common/include/aws/common/system_resource_util.h new file mode 100644 index 00000000000..ffe8d53e4fa --- /dev/null +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/system_resource_util.h @@ -0,0 +1,30 @@ +#ifndef AWS_COMMON_SYSTEM_RESOURCE_UTIL_H +#define AWS_COMMON_SYSTEM_RESOURCE_UTIL_H + +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/common/common.h> + +AWS_PUSH_SANE_WARNING_LEVEL + +AWS_EXTERN_C_BEGIN + +struct aws_memory_usage_stats { + size_t maxrss; /* max resident set size in kilobytes since program start */ + size_t page_faults; /* num of page faults since program start */ + + size_t _reserved[8]; +}; + +/* + * Get memory usage for current process. + * Raises AWS_ERROR_SYS_CALL_FAILURE on failure. + */ +AWS_COMMON_API int aws_init_memory_usage_for_current_process(struct aws_memory_usage_stats *memory_usage); + +AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL + +#endif /* AWS_COMMON_SYSTEM_RESOURCE_UTIL_H */ diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/uri.h b/contrib/restricted/aws/aws-c-common/include/aws/common/uri.h index 0ac1fd9b3e0..137ba7df800 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/uri.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/uri.h @@ -23,7 +23,7 @@ struct aws_uri { struct aws_byte_cursor user; struct aws_byte_cursor password; struct aws_byte_cursor host_name; - uint16_t port; + uint32_t port; struct aws_byte_cursor path; struct aws_byte_cursor query_string; struct aws_byte_cursor path_and_query; @@ -49,7 +49,7 @@ struct aws_uri_builder_options { struct aws_byte_cursor scheme; struct aws_byte_cursor path; struct aws_byte_cursor host_name; - uint16_t port; + uint32_t port; struct aws_array_list *query_params; struct aws_byte_cursor query_string; }; @@ -107,7 +107,7 @@ AWS_COMMON_API const struct aws_byte_cursor *aws_uri_host_name(const struct aws_ * Returns the port portion of the authority if it was present, otherwise, returns 0. * If this is 0, it is the users job to determine the correct port based on scheme and protocol. */ -AWS_COMMON_API uint16_t aws_uri_port(const struct aws_uri *uri); +AWS_COMMON_API uint32_t aws_uri_port(const struct aws_uri *uri); /** * Returns the path and query portion of the uri (i.e., the thing you send across the wire). @@ -115,6 +115,28 @@ AWS_COMMON_API uint16_t aws_uri_port(const struct aws_uri *uri); AWS_COMMON_API const struct aws_byte_cursor *aws_uri_path_and_query(const struct aws_uri *uri); /** + * For iterating over the params in the query string. + * `param` is an in/out argument used to track progress, it MUST be zeroed out to start. + * If true is returned, `param` contains the value of the next param. + * If false is returned, there are no further params. + * + * Edge cases: + * 1) Entries without '=' sign are treated as having a key and no value. + * Example: First param in query string "a&b=c" has key="a" value="" + * + * 2) Blank entries are skipped. + * Example: The only param in query string "&&a=b" is key="a" value="b" + */ +AWS_COMMON_API bool aws_query_string_next_param(struct aws_byte_cursor query_string, struct aws_uri_param *param); + +/** + * Parses query string and stores the parameters in 'out_params'. Returns AWS_OP_SUCCESS on success and + * AWS_OP_ERR on failure. The user is responsible for initializing out_params with item size of struct aws_query_param. + * The user is also responsible for cleaning up out_params when finished. + */ +AWS_COMMON_API int aws_query_string_params(struct aws_byte_cursor query_string, struct aws_array_list *out_params); + +/** * For iterating over the params in the uri query string. * `param` is an in/out argument used to track progress, it MUST be zeroed out to start. * If true is returned, `param` contains the value of the next param. diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/uuid.h b/contrib/restricted/aws/aws-c-common/include/aws/common/uuid.h index 4bd92c832ce..e3d9b1adeba 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/uuid.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/uuid.h @@ -17,7 +17,7 @@ struct aws_uuid { }; /* 36 bytes for the UUID plus one more for the null terminator. */ -#define AWS_UUID_STR_LEN 37 +enum { AWS_UUID_STR_LEN = 37 }; AWS_EXTERN_C_BEGIN diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/xml_parser.h b/contrib/restricted/aws/aws-c-common/include/aws/common/xml_parser.h index 542a72bec9c..c7ef282273b 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/xml_parser.h +++ b/contrib/restricted/aws/aws-c-common/include/aws/common/xml_parser.h @@ -13,7 +13,6 @@ AWS_PUSH_SANE_WARNING_LEVEL -struct aws_xml_parser; struct aws_xml_node; struct aws_xml_attribute { @@ -24,14 +23,15 @@ struct aws_xml_attribute { /** * Callback for when an xml node is encountered in the document. As a user you have a few options: * - * 1. reject the document parsing at this point by returning false. This will immediately stop doc parsing. + * 1. fail the parse by returning AWS_OP_ERR (after an error has been raised). This will stop any further parsing. * 2. call aws_xml_node_traverse() on the node to descend into the node with a new callback and user_data. * 3. call aws_xml_node_as_body() to retrieve the contents of the node as text. * + * You MUST NOT call both aws_xml_node_traverse() and aws_xml_node_as_body() on the same node. + * * return true to continue the parsing operation. */ -typedef bool( - aws_xml_parser_on_node_encountered_fn)(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data); +typedef int(aws_xml_parser_on_node_encountered_fn)(struct aws_xml_node *node, void *user_data); struct aws_xml_parser_options { /* xml document to parse. */ @@ -39,47 +39,35 @@ struct aws_xml_parser_options { /* Max node depth used for parsing document. */ size_t max_depth; -}; -AWS_EXTERN_C_BEGIN + /* Callback invoked on the root node */ + aws_xml_parser_on_node_encountered_fn *on_root_encountered; -/** - * Allocates an xml parser. - */ -AWS_COMMON_API -struct aws_xml_parser *aws_xml_parser_new( - struct aws_allocator *allocator, - const struct aws_xml_parser_options *options); + /* User data for callback */ + void *user_data; +}; -/* - * De-allocates an xml parser. - */ -AWS_COMMON_API -void aws_xml_parser_destroy(struct aws_xml_parser *parser); +AWS_EXTERN_C_BEGIN /** - * Parse the doc until the end or until a callback rejects the document. - * on_node_encountered will be invoked when the root node is encountered. + * Parse an XML document. + * WARNING: This is not a public API. It is only intended for use within the aws-c libraries. */ AWS_COMMON_API -int aws_xml_parser_parse( - struct aws_xml_parser *parser, - aws_xml_parser_on_node_encountered_fn *on_node_encountered, - void *user_data); +int aws_xml_parse(struct aws_allocator *allocator, const struct aws_xml_parser_options *options); /** * Writes the contents of the body of node into out_body. out_body is an output parameter in this case. Upon success, * out_body will contain the body of the node. */ AWS_COMMON_API -int aws_xml_node_as_body(struct aws_xml_parser *parser, struct aws_xml_node *node, struct aws_byte_cursor *out_body); +int aws_xml_node_as_body(struct aws_xml_node *node, struct aws_byte_cursor *out_body); /** * Traverse node and invoke on_node_encountered when a nested node is encountered. */ AWS_COMMON_API int aws_xml_node_traverse( - struct aws_xml_parser *parser, struct aws_xml_node *node, aws_xml_parser_on_node_encountered_fn *on_node_encountered, void *user_data); @@ -88,7 +76,7 @@ int aws_xml_node_traverse( * Get the name of an xml node. */ AWS_COMMON_API -int aws_xml_node_get_name(const struct aws_xml_node *node, struct aws_byte_cursor *out_name); +struct aws_byte_cursor aws_xml_node_get_name(const struct aws_xml_node *node); /* * Get the number of attributes for an xml node. @@ -100,10 +88,7 @@ size_t aws_xml_node_get_num_attributes(const struct aws_xml_node *node); * Get an attribute for an xml node by its index. */ AWS_COMMON_API -int aws_xml_node_get_attribute( - const struct aws_xml_node *node, - size_t attribute_index, - struct aws_xml_attribute *out_attribute); +struct aws_xml_attribute aws_xml_node_get_attribute(const struct aws_xml_node *node, size_t attribute_index); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL diff --git a/contrib/restricted/aws/aws-c-common/source/allocator.c b/contrib/restricted/aws/aws-c-common/source/allocator.c index 67e8695996a..d3d1e98bd72 100644 --- a/contrib/restricted/aws/aws-c-common/source/allocator.c +++ b/contrib/restricted/aws/aws-c-common/source/allocator.c @@ -12,7 +12,7 @@ #include <stdlib.h> #ifdef _WIN32 -# include <Windows.h> +# include <windows.h> #endif #ifdef __MACH__ @@ -34,7 +34,7 @@ bool aws_allocator_is_valid(const struct aws_allocator *alloc) { return alloc && AWS_OBJECT_PTR_IS_READABLE(alloc) && alloc->mem_acquire && alloc->mem_release; } -static void *s_default_malloc(struct aws_allocator *allocator, size_t size) { +static void *s_aligned_malloc(struct aws_allocator *allocator, size_t size) { (void)allocator; /* larger allocations should be aligned so that AVX and friends can avoid * the extra preamble during unaligned versions of memcpy/memset on big buffers @@ -48,7 +48,7 @@ static void *s_default_malloc(struct aws_allocator *allocator, size_t size) { * We use PAGE_SIZE as the boundary because we are not aware of any allocations of * this size or greater that are not data buffers */ - const size_t alignment = sizeof(void *) * (size > PAGE_SIZE ? 8 : 2); + const size_t alignment = sizeof(void *) * (size > (size_t)PAGE_SIZE ? 8 : 2); #if !defined(_WIN32) void *result = NULL; int err = posix_memalign(&result, alignment, size); @@ -62,7 +62,7 @@ static void *s_default_malloc(struct aws_allocator *allocator, size_t size) { #endif } -static void s_default_free(struct aws_allocator *allocator, void *ptr) { +static void s_aligned_free(struct aws_allocator *allocator, void *ptr) { (void)allocator; #if !defined(_WIN32) free(ptr); @@ -71,7 +71,7 @@ static void s_default_free(struct aws_allocator *allocator, void *ptr) { #endif } -static void *s_default_realloc(struct aws_allocator *allocator, void *ptr, size_t oldsize, size_t newsize) { +static void *s_aligned_realloc(struct aws_allocator *allocator, void *ptr, size_t oldsize, size_t newsize) { (void)allocator; (void)oldsize; AWS_FATAL_PRECONDITION(newsize); @@ -82,41 +82,92 @@ static void *s_default_realloc(struct aws_allocator *allocator, void *ptr, size_ } /* newsize is > oldsize, need more memory */ - void *new_mem = s_default_malloc(allocator, newsize); - AWS_PANIC_OOM(new_mem, "Unhandled OOM encountered in s_default_malloc"); + void *new_mem = s_aligned_malloc(allocator, newsize); + AWS_PANIC_OOM(new_mem, "Unhandled OOM encountered in s_aligned_malloc"); if (ptr) { memcpy(new_mem, ptr, oldsize); - s_default_free(allocator, ptr); + s_aligned_free(allocator, ptr); } return new_mem; #else - const size_t alignment = sizeof(void *) * (newsize > PAGE_SIZE ? 8 : 2); + const size_t alignment = sizeof(void *) * (newsize > (size_t)PAGE_SIZE ? 8 : 2); void *new_mem = _aligned_realloc(ptr, newsize, alignment); AWS_PANIC_OOM(new_mem, "Unhandled OOM encountered in _aligned_realloc"); return new_mem; #endif } -static void *s_default_calloc(struct aws_allocator *allocator, size_t num, size_t size) { - void *mem = s_default_malloc(allocator, num * size); - AWS_PANIC_OOM(mem, "Unhandled OOM encountered in s_default_malloc"); +static void *s_aligned_calloc(struct aws_allocator *allocator, size_t num, size_t size) { + void *mem = s_aligned_malloc(allocator, num * size); + AWS_PANIC_OOM(mem, "Unhandled OOM encountered in s_aligned_calloc"); memset(mem, 0, num * size); return mem; } +static void *s_non_aligned_malloc(struct aws_allocator *allocator, size_t size) { + (void)allocator; + void *result = malloc(size); + AWS_PANIC_OOM(result, "malloc failed to allocate memory"); + return result; +} + +static void s_non_aligned_free(struct aws_allocator *allocator, void *ptr) { + (void)allocator; + free(ptr); +} + +static void *s_non_aligned_realloc(struct aws_allocator *allocator, void *ptr, size_t oldsize, size_t newsize) { + (void)allocator; + (void)oldsize; + AWS_FATAL_PRECONDITION(newsize); + + if (newsize <= oldsize) { + return ptr; + } + + /* newsize is > oldsize, need more memory */ + void *new_mem = s_non_aligned_malloc(allocator, newsize); + AWS_PANIC_OOM(new_mem, "Unhandled OOM encountered in s_non_aligned_realloc"); + + if (ptr) { + memcpy(new_mem, ptr, oldsize); + s_non_aligned_free(allocator, ptr); + } + + return new_mem; +} + +static void *s_non_aligned_calloc(struct aws_allocator *allocator, size_t num, size_t size) { + (void)allocator; + void *mem = calloc(num, size); + AWS_PANIC_OOM(mem, "Unhandled OOM encountered in s_non_aligned_calloc"); + return mem; +} + static struct aws_allocator default_allocator = { - .mem_acquire = s_default_malloc, - .mem_release = s_default_free, - .mem_realloc = s_default_realloc, - .mem_calloc = s_default_calloc, + .mem_acquire = s_non_aligned_malloc, + .mem_release = s_non_aligned_free, + .mem_realloc = s_non_aligned_realloc, + .mem_calloc = s_non_aligned_calloc, }; struct aws_allocator *aws_default_allocator(void) { return &default_allocator; } +static struct aws_allocator aligned_allocator = { + .mem_acquire = s_aligned_malloc, + .mem_release = s_aligned_free, + .mem_realloc = s_aligned_realloc, + .mem_calloc = s_aligned_calloc, +}; + +struct aws_allocator *aws_aligned_allocator(void) { + return &aligned_allocator; +} + void *aws_mem_acquire(struct aws_allocator *allocator, size_t size) { AWS_FATAL_PRECONDITION(allocator != NULL); AWS_FATAL_PRECONDITION(allocator->mem_acquire != NULL); @@ -289,7 +340,7 @@ static void *s_cf_allocator_reallocate(void *ptr, CFIndex new_size, CFOptionFlag memcpy(&original_size, original_allocation, sizeof(size_t)); aws_mem_realloc(allocator, &original_allocation, original_size, (size_t)new_size); - + AWS_FATAL_ASSERT(original_allocation); size_t new_allocation_size = (size_t)new_size; memcpy(original_allocation, &new_allocation_size, sizeof(size_t)); @@ -306,7 +357,7 @@ static CFIndex s_cf_allocator_preferred_size(CFIndex size, CFOptionFlags hint, v (void)hint; (void)info; - return size + sizeof(size_t); + return (CFIndex)(size + sizeof(size_t)); } CFAllocatorRef aws_wrapped_cf_allocator_new(struct aws_allocator *allocator) { diff --git a/contrib/restricted/aws/aws-c-common/source/allocator_sba.c b/contrib/restricted/aws/aws-c-common/source/allocator_sba.c index 91a478f4f5e..ab1e833f045 100644 --- a/contrib/restricted/aws/aws-c-common/source/allocator_sba.c +++ b/contrib/restricted/aws/aws-c-common/source/allocator_sba.c @@ -44,7 +44,7 @@ #define AWS_SBA_TAG_VALUE 0x736f6d6570736575ULL /* list of sizes of bins, must be powers of 2, and less than AWS_SBA_PAGE_SIZE * 0.5 */ -#define AWS_SBA_BIN_COUNT 5 +enum { AWS_SBA_BIN_COUNT = 5 }; static const size_t s_bin_sizes[AWS_SBA_BIN_COUNT] = {32, 64, 128, 256, 512}; static const size_t s_max_bin_size = 512; @@ -348,7 +348,7 @@ static void s_sba_free_to_bin(struct sba_bin *bin, void *addr) { uint8_t *page_start = (uint8_t *)page + sizeof(struct page_header); uint8_t *page_end = page_start + AWS_SBA_PAGE_SIZE; /* Remove all chunks in the page from the free list */ - intptr_t chunk_idx = bin->free_chunks.length; + intptr_t chunk_idx = (intptr_t)bin->free_chunks.length; for (; chunk_idx >= 0; --chunk_idx) { uint8_t *chunk = NULL; aws_array_list_get_at(&bin->free_chunks, &chunk, chunk_idx); diff --git a/contrib/restricted/aws/aws-c-common/source/arch/arm/asm/cpuid.c b/contrib/restricted/aws/aws-c-common/source/arch/arm/auxv/cpuid.c index 6a306df9809..10499da73ea 100644 --- a/contrib/restricted/aws/aws-c-common/source/arch/arm/asm/cpuid.c +++ b/contrib/restricted/aws/aws-c-common/source/arch/arm/auxv/cpuid.c @@ -29,7 +29,9 @@ struct cap_bits { # if (defined(__aarch64__)) struct cap_bits s_check_cap[AWS_CPU_FEATURE_COUNT] = { - [AWS_CPU_FEATURE_ARM_CRC] = {0, 1 << 7 /* HWCAP_CRC */}, + [AWS_CPU_FEATURE_ARM_CRC] = {0, 1 << 7 /* HWCAP_CRC32 */}, + [AWS_CPU_FEATURE_ARM_PMULL] = {0, 1 << 4 /* HWCAP_PMULL */}, + [AWS_CPU_FEATURE_ARM_CRYPTO] = {0, 1 << 3 /* HWCAP_AES */}, }; # else struct cap_bits s_check_cap[AWS_CPU_FEATURE_COUNT] = { @@ -67,6 +69,10 @@ bool aws_cpu_has_feature(enum aws_cpu_feature_name feature_name) { switch (feature_name) { case AWS_CPU_FEATURE_ARM_CRC: +# if (defined(__aarch64__)) + case AWS_CPU_FEATURE_ARM_PMULL: + case AWS_CPU_FEATURE_ARM_CRYPTO: +# endif // (defined(__aarch64__)) return s_hwcap[s_check_cap[feature_name].cap] & s_check_cap[feature_name].bit; default: return false; diff --git a/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c b/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c index 98c51b88d18..465fccd17a5 100644 --- a/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c +++ b/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c @@ -85,6 +85,20 @@ static bool s_has_avx2(void) { return true; } +static bool s_has_avx512(void) { + uint32_t abcd[4]; + + /* Check AVX512F: + * CPUID.(EAX=07H, ECX=0H):EBX.AVX512[bit 16]==1 */ + uint32_t avx512_mask = (1 << 16); + aws_run_cpuid(7, 0, abcd); + if ((abcd[1] & avx512_mask) != avx512_mask) { + return false; + } + + return true; +} + static bool s_has_bmi2(void) { uint32_t abcd[4]; @@ -99,12 +113,26 @@ static bool s_has_bmi2(void) { return true; } +static bool s_has_vpclmulqdq(void) { + uint32_t abcd[4]; + /* Check VPCLMULQDQ: + * CPUID.(EAX=07H, ECX=0H):ECX.VPCLMULQDQ[bit 10]==1 */ + uint32_t vpclmulqdq_mask = (1 << 10); + aws_run_cpuid(7, 0, abcd); + if ((abcd[2] & vpclmulqdq_mask) != vpclmulqdq_mask) { + return false; + } + return true; +} + has_feature_fn *s_check_cpu_feature[AWS_CPU_FEATURE_COUNT] = { [AWS_CPU_FEATURE_CLMUL] = s_has_clmul, [AWS_CPU_FEATURE_SSE_4_1] = s_has_sse41, [AWS_CPU_FEATURE_SSE_4_2] = s_has_sse42, [AWS_CPU_FEATURE_AVX2] = s_has_avx2, + [AWS_CPU_FEATURE_AVX512] = s_has_avx512, [AWS_CPU_FEATURE_BMI2] = s_has_bmi2, + [AWS_CPU_FEATURE_VPCLMULQDQ] = s_has_vpclmulqdq, }; bool aws_cpu_has_feature(enum aws_cpu_feature_name feature_name) { diff --git a/contrib/restricted/aws/aws-c-common/source/arch/intel/encoding_avx2.c b/contrib/restricted/aws/aws-c-common/source/arch/intel/encoding_avx2.c index ebae8613810..439d6ddada9 100644 --- a/contrib/restricted/aws/aws-c-common/source/arch/intel/encoding_avx2.c +++ b/contrib/restricted/aws/aws-c-common/source/arch/intel/encoding_avx2.c @@ -179,7 +179,7 @@ static inline bool decode(const unsigned char *in, unsigned char *out) { * so we'll just copy right out of the vector as a fallback */ -#ifdef HAVE_MM256_EXTRACT_EPI64 +#ifdef AWS_HAVE_MM256_EXTRACT_EPI64 uint64_t hi = _mm256_extract_epi64(vec, 2); const uint64_t *p_hi = &hi; #else diff --git a/contrib/restricted/aws/aws-c-common/source/common.c b/contrib/restricted/aws/aws-c-common/source/common.c index 062d23228d5..2c971b5b475 100644 --- a/contrib/restricted/aws/aws-c-common/source/common.c +++ b/contrib/restricted/aws/aws-c-common/source/common.c @@ -14,7 +14,7 @@ #include <stdlib.h> #ifdef _WIN32 -# include <Windows.h> +# include <windows.h> #else # include <dlfcn.h> #endif @@ -220,7 +220,7 @@ static struct aws_error_info errors[] = { ), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_SYS_CALL_FAILURE, - "System call failure"), + "System call failure."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_FILE_INVALID_PATH, "Invalid file path."), @@ -232,7 +232,7 @@ static struct aws_error_info errors[] = { "User does not have permission to perform the requested action."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_STREAM_UNSEEKABLE, - "Stream does not support seek operations"), + "Stream does not support seek operations."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED, "A c-string like buffer was passed but a null terminator was not found within the bounds of the buffer."), @@ -244,7 +244,7 @@ static struct aws_error_info errors[] = { "Attempt to divide a number by zero."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_INVALID_FILE_HANDLE, - "Invalid file handle"), + "Invalid file handle."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_OPERATION_INTERUPTED, "The operation was interrupted." @@ -255,13 +255,25 @@ static struct aws_error_info errors[] = { ), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_PLATFORM_NOT_SUPPORTED, - "Feature not supported on this platform"), + "Feature not supported on this platform."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_INVALID_UTF8, - "Invalid UTF-8"), + "Invalid UTF-8."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_GET_HOME_DIRECTORY_FAILED, - "Failed to get home directory"), + "Failed to get home directory."), + AWS_DEFINE_ERROR_INFO_COMMON( + AWS_ERROR_INVALID_XML, + "Invalid XML document."), + AWS_DEFINE_ERROR_INFO_COMMON( + AWS_ERROR_FILE_OPEN_FAILURE, + "Failed opening file."), + AWS_DEFINE_ERROR_INFO_COMMON( + AWS_ERROR_FILE_READ_FAILURE, + "Failed reading from file."), + AWS_DEFINE_ERROR_INFO_COMMON( + AWS_ERROR_FILE_WRITE_FAILURE, + "Failed writing to file."), }; /* clang-format on */ @@ -311,15 +323,15 @@ void aws_common_library_init(struct aws_allocator *allocator) { assumptions due to the way loaders and dlload are often implemented and those symbols are defined by things like libpthread.so on some unix distros. Sorry about the memory usage here, but it's our only safe choice. Also, please don't do numa configurations if memory is your economic bottleneck. */ - g_libnuma_handle = dlopen("libnuma.so", RTLD_LOCAL); + g_libnuma_handle = dlopen("libnuma.so", RTLD_LAZY | RTLD_LOCAL); /* turns out so versioning is really inconsistent these days */ if (!g_libnuma_handle) { - g_libnuma_handle = dlopen("libnuma.so.1", RTLD_LOCAL); + g_libnuma_handle = dlopen("libnuma.so.1", RTLD_LAZY | RTLD_LOCAL); } if (!g_libnuma_handle) { - g_libnuma_handle = dlopen("libnuma.so.2", RTLD_LOCAL); + g_libnuma_handle = dlopen("libnuma.so.2", RTLD_LAZY | RTLD_LOCAL); } if (g_libnuma_handle) { diff --git a/contrib/restricted/aws/aws-c-common/source/date_time.c b/contrib/restricted/aws/aws-c-common/source/date_time.c index cee4a90d88b..0c1869bcefb 100644 --- a/contrib/restricted/aws/aws-c-common/source/date_time.c +++ b/contrib/restricted/aws/aws-c-common/source/date_time.c @@ -131,7 +131,7 @@ static bool is_utc_time_zone(const char *str) { size_t len = strlen(str); if (len > 0) { - if (str[0] == 'Z') { + if (tolower((uint8_t)str[0]) == 'z') { return true; } @@ -207,232 +207,7 @@ enum parser_state { FINISHED, }; -static int s_parse_iso_8601_basic(const struct aws_byte_cursor *date_str_cursor, struct tm *parsed_time) { - size_t index = 0; - size_t state_start_index = 0; - enum parser_state state = ON_YEAR; - bool error = false; - - AWS_ZERO_STRUCT(*parsed_time); - - while (state < FINISHED && !error && index < date_str_cursor->len) { - char c = date_str_cursor->ptr[index]; - size_t sub_index = index - state_start_index; - switch (state) { - case ON_YEAR: - if (aws_isdigit(c)) { - parsed_time->tm_year = parsed_time->tm_year * 10 + (c - '0'); - if (sub_index == 3) { - state = ON_MONTH; - state_start_index = index + 1; - parsed_time->tm_year -= 1900; - } - } else { - error = true; - } - break; - - case ON_MONTH: - if (aws_isdigit(c)) { - parsed_time->tm_mon = parsed_time->tm_mon * 10 + (c - '0'); - if (sub_index == 1) { - state = ON_MONTH_DAY; - state_start_index = index + 1; - parsed_time->tm_mon -= 1; - } - } else { - error = true; - } - break; - - case ON_MONTH_DAY: - if (c == 'T' && sub_index == 2) { - state = ON_HOUR; - state_start_index = index + 1; - } else if (aws_isdigit(c)) { - parsed_time->tm_mday = parsed_time->tm_mday * 10 + (c - '0'); - } else { - error = true; - } - break; - - case ON_HOUR: - if (aws_isdigit(c)) { - parsed_time->tm_hour = parsed_time->tm_hour * 10 + (c - '0'); - if (sub_index == 1) { - state = ON_MINUTE; - state_start_index = index + 1; - } - } else { - error = true; - } - break; - - case ON_MINUTE: - if (aws_isdigit(c)) { - parsed_time->tm_min = parsed_time->tm_min * 10 + (c - '0'); - if (sub_index == 1) { - state = ON_SECOND; - state_start_index = index + 1; - } - } else { - error = true; - } - break; - - case ON_SECOND: - if (aws_isdigit(c)) { - parsed_time->tm_sec = parsed_time->tm_sec * 10 + (c - '0'); - if (sub_index == 1) { - state = ON_TZ; - state_start_index = index + 1; - } - } else { - error = true; - } - break; - - case ON_TZ: - if (c == 'Z' && (sub_index == 0 || sub_index == 3)) { - state = FINISHED; - } else if (!aws_isdigit(c) || sub_index > 3) { - error = true; - } - break; - - default: - error = true; - break; - } - - index++; - } - - /* ISO8601 supports date only with no time portion. state ==ON_MONTH_DAY catches this case. */ - return (state == FINISHED || state == ON_MONTH_DAY) && !error ? AWS_OP_SUCCESS : AWS_OP_ERR; -} - -static int s_parse_iso_8601(const struct aws_byte_cursor *date_str_cursor, struct tm *parsed_time) { - size_t index = 0; - size_t state_start_index = 0; - enum parser_state state = ON_YEAR; - bool error = false; - bool advance = true; - - AWS_ZERO_STRUCT(*parsed_time); - - while (state < FINISHED && !error && index < date_str_cursor->len) { - char c = date_str_cursor->ptr[index]; - switch (state) { - case ON_YEAR: - if (c == '-' && index - state_start_index == 4) { - state = ON_MONTH; - state_start_index = index + 1; - parsed_time->tm_year -= 1900; - } else if (aws_isdigit(c)) { - parsed_time->tm_year = parsed_time->tm_year * 10 + (c - '0'); - } else { - error = true; - } - break; - case ON_MONTH: - if (c == '-' && index - state_start_index == 2) { - state = ON_MONTH_DAY; - state_start_index = index + 1; - parsed_time->tm_mon -= 1; - } else if (aws_isdigit(c)) { - parsed_time->tm_mon = parsed_time->tm_mon * 10 + (c - '0'); - } else { - error = true; - } - - break; - case ON_MONTH_DAY: - if (c == 'T' && index - state_start_index == 2) { - state = ON_HOUR; - state_start_index = index + 1; - } else if (aws_isdigit(c)) { - parsed_time->tm_mday = parsed_time->tm_mday * 10 + (c - '0'); - } else { - error = true; - } - break; - /* note: no time portion is spec compliant. */ - case ON_HOUR: - /* time parts can be delimited by ':' or just concatenated together, but must always be 2 digits. */ - if (index - state_start_index == 2) { - state = ON_MINUTE; - state_start_index = index + 1; - if (aws_isdigit(c)) { - state_start_index = index; - advance = false; - } else if (c != ':') { - error = true; - } - } else if (aws_isdigit(c)) { - parsed_time->tm_hour = parsed_time->tm_hour * 10 + (c - '0'); - } else { - error = true; - } - - break; - case ON_MINUTE: - /* time parts can be delimited by ':' or just concatenated together, but must always be 2 digits. */ - if (index - state_start_index == 2) { - state = ON_SECOND; - state_start_index = index + 1; - if (aws_isdigit(c)) { - state_start_index = index; - advance = false; - } else if (c != ':') { - error = true; - } - } else if (aws_isdigit(c)) { - parsed_time->tm_min = parsed_time->tm_min * 10 + (c - '0'); - } else { - error = true; - } - - break; - case ON_SECOND: - if (c == 'Z' && index - state_start_index == 2) { - state = FINISHED; - state_start_index = index + 1; - } else if (c == '.' && index - state_start_index == 2) { - state = ON_TZ; - state_start_index = index + 1; - } else if (aws_isdigit(c)) { - parsed_time->tm_sec = parsed_time->tm_sec * 10 + (c - '0'); - } else { - error = true; - } - - break; - case ON_TZ: - if (c == 'Z') { - state = FINISHED; - state_start_index = index + 1; - } else if (!aws_isdigit(c)) { - error = true; - } - break; - default: - error = true; - break; - } - - if (advance) { - index++; - } else { - advance = true; - } - } - - /* ISO8601 supports date only with no time portion. state ==ON_MONTH_DAY catches this case. */ - return (state == FINISHED || state == ON_MONTH_DAY) && !error ? AWS_OP_SUCCESS : AWS_OP_ERR; -} - -static int s_parse_rfc_822( +static bool s_parse_rfc_822( const struct aws_byte_cursor *date_str_cursor, struct tm *parsed_time, struct aws_date_time *dt) { @@ -446,7 +221,7 @@ static int s_parse_rfc_822( AWS_ZERO_STRUCT(*parsed_time); while (!error && index < len) { - char c = date_str_cursor->ptr[index]; + char c = (char)date_str_cursor->ptr[index]; switch (state) { /* week day abbr is optional. */ @@ -564,7 +339,186 @@ static int s_parse_rfc_822( } } - return error || state != ON_TZ ? AWS_OP_ERR : AWS_OP_SUCCESS; + return error || state != ON_TZ ? false : true; +} + +/* Returns true if the next N characters are digits, advancing the string and getting their numeric value */ +static bool s_read_n_digits(struct aws_byte_cursor *str, size_t n, int *out_val) { + int val = 0; + if (str->len < n) { + return false; + } + + for (size_t i = 0; i < n; ++i) { + uint8_t c = str->ptr[i]; + if (aws_isdigit(c)) { + val = val * 10 + (c - '0'); + } else { + return false; + } + } + + aws_byte_cursor_advance(str, n); + *out_val = val; + return true; +} + +/* Returns true if there's 1 more character, advancing the string and getting the character's value. */ +static bool s_read_1_char(struct aws_byte_cursor *str, uint8_t *out_c) { + if (str->len == 0) { + return false; + } + + *out_c = str->ptr[0]; + aws_byte_cursor_advance(str, 1); + return true; +} + +/* Returns true (and advances str) if next character is c */ +static bool s_advance_if_next_char_is(struct aws_byte_cursor *str, uint8_t c) { + if (str->len == 0 || str->ptr[0] != c) { + return false; + } + + aws_byte_cursor_advance(str, 1); + return true; +} + +/* If the (optional) fractional seconds (".123" or ",123") are next, str is advanced. + * Returns false if there was an error */ +static bool s_skip_optional_fractional_seconds(struct aws_byte_cursor *str) { + if (str->len == 0) { + return true; + } + + uint8_t c = str->ptr[0]; + if (c != '.' && c != ',') { + return true; + } + + size_t num_digits = 0; + for (size_t i = 1; i < str->len; ++i) { + if (aws_isdigit(str->ptr[i])) { + ++num_digits; + } else { + break; + } + } + + if (num_digits == 0) { + return false; + } + + aws_byte_cursor_advance(str, 1 + num_digits); + return true; +} + +/* Parses ISO 8601, both extended and basic format are accepted. + * Returns true if successful. */ +static bool s_parse_iso_8601(struct aws_byte_cursor str, struct tm *parsed_time, time_t *seconds_offset) { + AWS_ZERO_STRUCT(*parsed_time); + *seconds_offset = 0; + uint8_t c = 0; + + /* read year */ + if (!s_read_n_digits(&str, 4, &parsed_time->tm_year)) { + return false; + } + parsed_time->tm_year -= 1900; + + /* be lenient, allow date with separator or not */ + bool has_date_separator = s_advance_if_next_char_is(&str, '-'); + + /* read month */ + if (!s_read_n_digits(&str, 2, &parsed_time->tm_mon)) { + return false; + } + parsed_time->tm_mon -= 1; + + if (has_date_separator) { + if (!s_read_1_char(&str, &c) || c != '-') { + return false; + } + } + + /* read month-day */ + if (!s_read_n_digits(&str, 2, &parsed_time->tm_mday)) { + return false; + } + + /* ISO8601 supports date only with no time portion */ + if (str.len == 0) { + return true; + } + + /* followed by T or space (allowed by rfc3339#section-5.6) */ + if (!s_read_1_char(&str, &c) || (tolower(c) != 't' && c != ' ')) { + return false; + } + + /* read hours */ + if (!s_read_n_digits(&str, 2, &parsed_time->tm_hour)) { + return false; + } + + /* be lenient, allow time with separator or not */ + bool has_time_separator = s_advance_if_next_char_is(&str, ':'); + + /* read minutes */ + if (!s_read_n_digits(&str, 2, &parsed_time->tm_min)) { + return false; + } + + if (has_time_separator) { + if (!s_read_1_char(&str, &c) || c != ':') { + return false; + } + } + + /* read seconds */ + if (!s_read_n_digits(&str, 2, &parsed_time->tm_sec)) { + return false; + } + + /* fractional seconds are optional (discard value since tm struct has no corresponding field) */ + if (!s_skip_optional_fractional_seconds(&str)) { + return false; + } + + /* read final Z, or (+/-) indicating there will be an offset */ + if (!s_read_1_char(&str, &c)) { + return false; + } + + if (tolower(c) == 'z') { + /* Success! */ + return true; + } + + if (c != '+' && c != '-') { + return false; + } + + bool negative_offset = c == '-'; + + /* read hours offset */ + int hours_offset = 0; + if (!s_read_n_digits(&str, 2, &hours_offset)) { + return false; + } + + /* be lenient, allow offset with separator or not */ + s_advance_if_next_char_is(&str, ':'); + + /* read minutes offset */ + int minutes_offset = 0; + if (!s_read_n_digits(&str, 2, &minutes_offset)) { + return false; + } + + /* Success! */ + *seconds_offset = (time_t)(hours_offset * 3600 + minutes_offset * 60) * (negative_offset ? -1 : 1); + return true; } int aws_date_time_init_from_str_cursor( @@ -579,22 +533,16 @@ int aws_date_time_init_from_str_cursor( bool successfully_parsed = false; time_t seconds_offset = 0; - if (fmt == AWS_DATE_FORMAT_ISO_8601 || fmt == AWS_DATE_FORMAT_AUTO_DETECT) { - if (!s_parse_iso_8601(date_str_cursor, &parsed_time)) { - dt->utc_assumed = true; - successfully_parsed = true; - } - } - - if (fmt == AWS_DATE_FORMAT_ISO_8601_BASIC || (fmt == AWS_DATE_FORMAT_AUTO_DETECT && !successfully_parsed)) { - if (!s_parse_iso_8601_basic(date_str_cursor, &parsed_time)) { + if (fmt == AWS_DATE_FORMAT_ISO_8601 || fmt == AWS_DATE_FORMAT_ISO_8601_BASIC || + fmt == AWS_DATE_FORMAT_AUTO_DETECT) { + if (s_parse_iso_8601(*date_str_cursor, &parsed_time, &seconds_offset)) { dt->utc_assumed = true; successfully_parsed = true; } } if (fmt == AWS_DATE_FORMAT_RFC822 || (fmt == AWS_DATE_FORMAT_AUTO_DETECT && !successfully_parsed)) { - if (!s_parse_rfc_822(date_str_cursor, &parsed_time, dt)) { + if (s_parse_rfc_822(date_str_cursor, &parsed_time, dt)) { successfully_parsed = true; if (dt->utc_assumed) { diff --git a/contrib/restricted/aws/aws-c-common/source/encoding.c b/contrib/restricted/aws/aws-c-common/source/encoding.c index 9ca5ca4fbaa..bc860aa4b99 100644 --- a/contrib/restricted/aws/aws-c-common/source/encoding.c +++ b/contrib/restricted/aws/aws-c-common/source/encoding.c @@ -185,7 +185,7 @@ int aws_hex_decode(const struct aws_byte_cursor *AWS_RESTRICT to_decode, struct /* if the buffer isn't even, prepend a 0 to the buffer. */ if (AWS_UNLIKELY(to_decode->len & 0x01)) { i = 1; - if (s_hex_decode_char_to_int(to_decode->ptr[0], &low_value)) { + if (s_hex_decode_char_to_int((char)to_decode->ptr[0], &low_value)) { return aws_raise_error(AWS_ERROR_INVALID_HEX_STR); } diff --git a/contrib/restricted/aws/aws-c-common/source/error.c b/contrib/restricted/aws/aws-c-common/source/error.c index ad3cec86931..239b318bdf0 100644 --- a/contrib/restricted/aws/aws-c-common/source/error.c +++ b/contrib/restricted/aws/aws-c-common/source/error.c @@ -194,11 +194,19 @@ void aws_unregister_error_info(const struct aws_error_info_list *error_info) { } int aws_translate_and_raise_io_error(int error_no) { + return aws_translate_and_raise_io_error_or(error_no, AWS_ERROR_SYS_CALL_FAILURE); +} + +int aws_translate_and_raise_io_error_or(int error_no, int fallback_aws_error_code) { switch (error_no) { case EINVAL: - return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - case ESPIPE: - return aws_raise_error(AWS_ERROR_STREAM_UNSEEKABLE); + /* If useful fallback code provided, raise that instead of AWS_ERROR_INVALID_ARGUMENT, + * which isn't very useful when it bubbles out from deep within some complex system. */ + if (fallback_aws_error_code != AWS_ERROR_SYS_CALL_FAILURE) { + return aws_raise_error(fallback_aws_error_code); + } else { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } case EPERM: case EACCES: return aws_raise_error(AWS_ERROR_NO_PERMISSION); @@ -217,6 +225,6 @@ int aws_translate_and_raise_io_error(int error_no) { case ENOTEMPTY: return aws_raise_error(AWS_ERROR_DIRECTORY_NOT_EMPTY); default: - return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); + return aws_raise_error(fallback_aws_error_code); } } diff --git a/contrib/restricted/aws/aws-c-common/source/external/cJSON.c b/contrib/restricted/aws/aws-c-common/source/external/cJSON.c index d6e49d9a894..9ca6351bf8c 100644 --- a/contrib/restricted/aws/aws-c-common/source/external/cJSON.c +++ b/contrib/restricted/aws/aws-c-common/source/external/cJSON.c @@ -1,25 +1,33 @@ /* -Copyright (c) 2009-2017 Dave Gamble and cJSON contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. + Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. */ +/* + * This file has been modified from its original version by Amazon: + * (1) Remove cJSON_GetErrorPtr and global_error as they are not thread-safe. + * (2) Add NOLINTBEGIN/NOLINTEND so clang-tidy ignores file. + * (3) Replace sprintf() with snprintf() to make compilers happier. + */ +/* NOLINTBEGIN */ + /* cJSON */ /* JSON parser in C. */ @@ -56,7 +64,7 @@ THE SOFTWARE. #pragma GCC visibility pop #endif -#include <aws/common/external/cJSON.h> +#include "cJSON.h" /* define our own boolean type */ #ifdef true @@ -86,87 +94,95 @@ THE SOFTWARE. #endif typedef struct { - const unsigned char *json; - size_t position; + const unsigned char *json; + size_t position; } error; +#if 0 /* Amazon edit */ +static error global_error = { NULL, 0 }; + +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) +{ + return (const char*) (global_error.json + global_error.position); +} +#endif /* Amazon edit */ CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item) { - if (!cJSON_IsString(item)) - { - return NULL; - } + if (!cJSON_IsString(item)) + { + return NULL; + } - return item->valuestring; + return item->valuestring; } CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item) { - if (!cJSON_IsNumber(item)) - { - return (double) NAN; - } + if (!cJSON_IsNumber(item)) + { + return (double) NAN; + } - return item->valuedouble; + return item->valuedouble; } /* This is a safeguard to prevent copy-pasters from using incompatible C and header files */ -#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || (CJSON_VERSION_PATCH != 15) - #error cJSON.h and cJSON.c have different versions. Make sure that both have the same. +#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || (CJSON_VERSION_PATCH != 17) + #error cJSON.h and cJSON.c have different versions. Make sure that both have the same. #endif CJSON_PUBLIC(const char*) cJSON_Version(void) { - static char version[15]; - snprintf(version, sizeof(version) / sizeof(char), "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH); + static char version[15]; + snprintf(version, sizeof(version), "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH); /* Amazon edit */ - return version; + return version; } /* Case insensitive string comparison, doesn't consider two NULL pointers equal though */ static int case_insensitive_strcmp(const unsigned char *string1, const unsigned char *string2) { - if ((string1 == NULL) || (string2 == NULL)) - { - return 1; - } + if ((string1 == NULL) || (string2 == NULL)) + { + return 1; + } - if (string1 == string2) - { - return 0; - } + if (string1 == string2) + { + return 0; + } - for(; tolower(*string1) == tolower(*string2); (void)string1++, string2++) - { - if (*string1 == '\0') - { - return 0; - } - } + for(; tolower(*string1) == tolower(*string2); (void)string1++, string2++) + { + if (*string1 == '\0') + { + return 0; + } + } - return tolower(*string1) - tolower(*string2); + return tolower(*string1) - tolower(*string2); } typedef struct internal_hooks { - void *(CJSON_CDECL *allocate)(size_t size); - void (CJSON_CDECL *deallocate)(void *pointer); - void *(CJSON_CDECL *reallocate)(void *pointer, size_t size); + void *(CJSON_CDECL *allocate)(size_t size); + void (CJSON_CDECL *deallocate)(void *pointer); + void *(CJSON_CDECL *reallocate)(void *pointer, size_t size); } internal_hooks; #if defined(_MSC_VER) /* work around MSVC error C2322: '...' address of dllimport '...' is not static */ static void * CJSON_CDECL internal_malloc(size_t size) { - return malloc(size); + return malloc(size); } static void CJSON_CDECL internal_free(void *pointer) { - free(pointer); + free(pointer); } static void * CJSON_CDECL internal_realloc(void *pointer, size_t size) { - return realloc(pointer, size); + return realloc(pointer, size); } #else #define internal_malloc malloc @@ -177,844 +193,852 @@ static void * CJSON_CDECL internal_realloc(void *pointer, size_t size) /* strlen of character literals resolved at compile time */ #define static_strlen(string_literal) (sizeof(string_literal) - sizeof("")) -static internal_hooks global_hooks = { internal_malloc, internal_free, internal_realloc }; // NOLINT +static internal_hooks global_hooks = { internal_malloc, internal_free, internal_realloc }; -static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks) // NOLINT +static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks) { - size_t length = 0; - unsigned char *copy = NULL; + size_t length = 0; + unsigned char *copy = NULL; - if (string == NULL) - { - return NULL; - } + if (string == NULL) + { + return NULL; + } - length = strlen((const char*)string) + sizeof(""); - copy = (unsigned char*)hooks->allocate(length); - if (copy == NULL) - { - return NULL; - } - memcpy(copy, string, length); + length = strlen((const char*)string) + sizeof(""); + copy = (unsigned char*)hooks->allocate(length); + if (copy == NULL) + { + return NULL; + } + memcpy(copy, string, length); - return copy; + return copy; } -CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks) // NOLINT +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks) { - if (hooks == NULL) - { - /* Reset hooks */ - global_hooks.allocate = malloc; - global_hooks.deallocate = free; - global_hooks.reallocate = realloc; - return; - } + if (hooks == NULL) + { + /* Reset hooks */ + global_hooks.allocate = malloc; + global_hooks.deallocate = free; + global_hooks.reallocate = realloc; + return; + } - global_hooks.allocate = malloc; - if (hooks->malloc_fn != NULL) - { - global_hooks.allocate = hooks->malloc_fn; - } + global_hooks.allocate = malloc; + if (hooks->malloc_fn != NULL) + { + global_hooks.allocate = hooks->malloc_fn; + } - global_hooks.deallocate = free; - if (hooks->free_fn != NULL) - { - global_hooks.deallocate = hooks->free_fn; - } + global_hooks.deallocate = free; + if (hooks->free_fn != NULL) + { + global_hooks.deallocate = hooks->free_fn; + } - /* use realloc only if both free and malloc are used */ - global_hooks.reallocate = NULL; - if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free)) - { - global_hooks.reallocate = realloc; - } + /* use realloc only if both free and malloc are used */ + global_hooks.reallocate = NULL; + if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free)) + { + global_hooks.reallocate = realloc; + } } /* Internal constructor. */ static cJSON *cJSON_New_Item(const internal_hooks * const hooks) { - cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON)); - if (node) - { - memset(node, '\0', sizeof(cJSON)); - } + cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON)); + if (node) + { + memset(node, '\0', sizeof(cJSON)); + } - return node; + return node; } /* Delete a cJSON structure. */ CJSON_PUBLIC(void) cJSON_Delete(cJSON *item) { - cJSON *next = NULL; - while (item != NULL) - { - next = item->next; - if (!(item->type & cJSON_IsReference) && (item->child != NULL)) - { - cJSON_Delete(item->child); - } - if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL)) - { - global_hooks.deallocate(item->valuestring); - } - if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) - { - global_hooks.deallocate(item->string); - } - global_hooks.deallocate(item); - item = next; - } + cJSON *next = NULL; + while (item != NULL) + { + next = item->next; + if (!(item->type & cJSON_IsReference) && (item->child != NULL)) + { + cJSON_Delete(item->child); + } + if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL)) + { + global_hooks.deallocate(item->valuestring); + } + if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) + { + global_hooks.deallocate(item->string); + } + global_hooks.deallocate(item); + item = next; + } } /* get the decimal point character of the current locale */ static unsigned char get_decimal_point(void) { #ifdef ENABLE_LOCALES - struct lconv *lconv = localeconv(); - return (unsigned char) lconv->decimal_point[0]; + struct lconv *lconv = localeconv(); + return (unsigned char) lconv->decimal_point[0]; #else - return '.'; + return '.'; #endif } typedef struct { - const unsigned char *content; - size_t length; - size_t offset; - size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */ - internal_hooks hooks; + const unsigned char *content; + size_t length; + size_t offset; + size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */ + internal_hooks hooks; } parse_buffer; /* check if the given size is left to read in a given parse buffer (starting with 1) */ -#define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) // NOLINT +#define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) /* check if the buffer can be accessed at the given index (starting with 0) */ -#define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) // NOLINT +#define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) #define cannot_access_at_index(buffer, index) (!can_access_at_index(buffer, index)) /* get a pointer to the buffer at the position */ #define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset) /* Parse the input text to generate a number, and populate the result into item. */ -static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer) // NOLINT -{ - double number = 0; - unsigned char *after_end = NULL; - unsigned char number_c_string[64]; - unsigned char decimal_point = get_decimal_point(); // NOLINT - size_t i = 0; - - if ((input_buffer == NULL) || (input_buffer->content == NULL)) - { - return false; // NOLINT - } - - /* copy the number into a temporary buffer and replace '.' with the decimal point - * of the current locale (for strtod) - * This also takes care of '\0' not necessarily being available for marking the end of the input */ - for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++) - { - switch (buffer_at_offset(input_buffer)[i]) - { - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - case '+': - case '-': - case 'e': - case 'E': - number_c_string[i] = buffer_at_offset(input_buffer)[i]; - break; - - case '.': - number_c_string[i] = decimal_point; - break; - - default: - goto loop_end; - } - } +static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer) +{ + double number = 0; + unsigned char *after_end = NULL; + unsigned char number_c_string[64]; + unsigned char decimal_point = get_decimal_point(); + size_t i = 0; + + if ((input_buffer == NULL) || (input_buffer->content == NULL)) + { + return false; + } + + /* copy the number into a temporary buffer and replace '.' with the decimal point + * of the current locale (for strtod) + * This also takes care of '\0' not necessarily being available for marking the end of the input */ + for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++) + { + switch (buffer_at_offset(input_buffer)[i]) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '+': + case '-': + case 'e': + case 'E': + number_c_string[i] = buffer_at_offset(input_buffer)[i]; + break; + + case '.': + number_c_string[i] = decimal_point; + break; + + default: + goto loop_end; + } + } loop_end: - number_c_string[i] = '\0'; + number_c_string[i] = '\0'; - number = strtod((const char*)number_c_string, (char**)&after_end); - if (number_c_string == after_end) - { - return false; /* parse_error */ // NOLINT - } + number = strtod((const char*)number_c_string, (char**)&after_end); + if (number_c_string == after_end) + { + return false; /* parse_error */ + } - item->valuedouble = number; + item->valuedouble = number; - /* use saturation in case of overflow */ - if (number >= INT_MAX) - { // NOLINT - item->valueint = INT_MAX; - } - else if (number <= (double)INT_MIN) - { - item->valueint = INT_MIN; - } - else - { - item->valueint = (int)number; - } + /* use saturation in case of overflow */ + if (number >= INT_MAX) + { + item->valueint = INT_MAX; + } + else if (number <= (double)INT_MIN) + { + item->valueint = INT_MIN; + } + else + { + item->valueint = (int)number; + } - item->type = cJSON_Number; // NOLINT + item->type = cJSON_Number; - input_buffer->offset += (size_t)(after_end - number_c_string); - return true; // NOLINT + input_buffer->offset += (size_t)(after_end - number_c_string); + return true; } /* don't ask me, but the original cJSON_SetNumberValue returns an integer or double */ -CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) // NOLINT +CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) { - if (number >= INT_MAX) - { - object->valueint = INT_MAX; - } - else if (number <= (double)INT_MIN) - { - object->valueint = INT_MIN; - } - else - { - object->valueint = (int)number; - } + if (number >= INT_MAX) + { + object->valueint = INT_MAX; + } + else if (number <= (double)INT_MIN) + { + object->valueint = INT_MIN; + } + else + { + object->valueint = (int)number; + } - return object->valuedouble = number; + return object->valuedouble = number; } CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring) { - char *copy = NULL; - /* if object's type is not cJSON_String or is cJSON_IsReference, it should not set valuestring */ - if (!(object->type & cJSON_String) || (object->type & cJSON_IsReference)) - { - return NULL; - } - if (strlen(valuestring) <= strlen(object->valuestring)) - { - size_t value_length = strlen(valuestring) + sizeof(""); - memcpy(object->valuestring, valuestring, value_length); - return object->valuestring; - } - copy = (char*) cJSON_strdup((const unsigned char*)valuestring, &global_hooks); - if (copy == NULL) - { - return NULL; - } - if (object->valuestring != NULL) - { - cJSON_free(object->valuestring); - } - object->valuestring = copy; - - return copy; + char *copy = NULL; + /* if object's type is not cJSON_String or is cJSON_IsReference, it should not set valuestring */ + if ((object == NULL) || !(object->type & cJSON_String) || (object->type & cJSON_IsReference)) + { + return NULL; + } + /* return NULL if the object is corrupted */ + if (object->valuestring == NULL) + { + return NULL; + } + if (strlen(valuestring) <= strlen(object->valuestring)) + { + strcpy(object->valuestring, valuestring); + return object->valuestring; + } + copy = (char*) cJSON_strdup((const unsigned char*)valuestring, &global_hooks); + if (copy == NULL) + { + return NULL; + } + if (object->valuestring != NULL) + { + cJSON_free(object->valuestring); + } + object->valuestring = copy; + + return copy; } typedef struct { - unsigned char *buffer; - size_t length; - size_t offset; - size_t depth; /* current nesting depth (for formatted printing) */ - cJSON_bool noalloc; - cJSON_bool format; /* is this print a formatted print */ - internal_hooks hooks; + unsigned char *buffer; + size_t length; + size_t offset; + size_t depth; /* current nesting depth (for formatted printing) */ + cJSON_bool noalloc; + cJSON_bool format; /* is this print a formatted print */ + internal_hooks hooks; } printbuffer; /* realloc printbuffer if necessary to have at least "needed" bytes more */ -static unsigned char* ensure(printbuffer * const p, size_t needed) // NOLINT -{ - unsigned char *newbuffer = NULL; - size_t newsize = 0; - - if ((p == NULL) || (p->buffer == NULL)) - { - return NULL; - } - - if ((p->length > 0) && (p->offset >= p->length)) - { - /* make sure that offset is valid */ - return NULL; - } - - if (needed > INT_MAX) - { - /* sizes bigger than INT_MAX are currently not supported */ - return NULL; - } - - needed += p->offset + 1; - if (needed <= p->length) - { - return p->buffer + p->offset; - } - - if (p->noalloc) { - return NULL; - } - - /* calculate new buffer size */ - if (needed > (INT_MAX / 2)) - { - /* overflow of int, use INT_MAX if possible */ - if (needed <= INT_MAX) - { - newsize = INT_MAX; - } - else - { - return NULL; - } - } - else - { - newsize = needed * 2; - } - - if (p->hooks.reallocate != NULL) - { - /* reallocate with realloc if available */ - newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize); - if (newbuffer == NULL) - { - p->hooks.deallocate(p->buffer); - p->length = 0; - p->buffer = NULL; - - return NULL; - } - } - else - { - /* otherwise reallocate manually */ - newbuffer = (unsigned char*)p->hooks.allocate(newsize); - if (!newbuffer) - { - p->hooks.deallocate(p->buffer); - p->length = 0; - p->buffer = NULL; - - return NULL; - } - - memcpy(newbuffer, p->buffer, p->offset + 1); - p->hooks.deallocate(p->buffer); - } - p->length = newsize; - p->buffer = newbuffer; - - return newbuffer + p->offset; +static unsigned char* ensure(printbuffer * const p, size_t needed) +{ + unsigned char *newbuffer = NULL; + size_t newsize = 0; + + if ((p == NULL) || (p->buffer == NULL)) + { + return NULL; + } + + if ((p->length > 0) && (p->offset >= p->length)) + { + /* make sure that offset is valid */ + return NULL; + } + + if (needed > INT_MAX) + { + /* sizes bigger than INT_MAX are currently not supported */ + return NULL; + } + + needed += p->offset + 1; + if (needed <= p->length) + { + return p->buffer + p->offset; + } + + if (p->noalloc) { + return NULL; + } + + /* calculate new buffer size */ + if (needed > (INT_MAX / 2)) + { + /* overflow of int, use INT_MAX if possible */ + if (needed <= INT_MAX) + { + newsize = INT_MAX; + } + else + { + return NULL; + } + } + else + { + newsize = needed * 2; + } + + if (p->hooks.reallocate != NULL) + { + /* reallocate with realloc if available */ + newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize); + if (newbuffer == NULL) + { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + } + else + { + /* otherwise reallocate manually */ + newbuffer = (unsigned char*)p->hooks.allocate(newsize); + if (!newbuffer) + { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + + memcpy(newbuffer, p->buffer, p->offset + 1); + p->hooks.deallocate(p->buffer); + } + p->length = newsize; + p->buffer = newbuffer; + + return newbuffer + p->offset; } /* calculate the new length of the string in a printbuffer and update the offset */ -static void update_offset(printbuffer * const buffer) // NOLINT +static void update_offset(printbuffer * const buffer) { - const unsigned char *buffer_pointer = NULL; - if ((buffer == NULL) || (buffer->buffer == NULL)) - { - return; - } - buffer_pointer = buffer->buffer + buffer->offset; + const unsigned char *buffer_pointer = NULL; + if ((buffer == NULL) || (buffer->buffer == NULL)) + { + return; + } + buffer_pointer = buffer->buffer + buffer->offset; - buffer->offset += strlen((const char*)buffer_pointer); + buffer->offset += strlen((const char*)buffer_pointer); } /* securely comparison of floating-point variables */ -static cJSON_bool compare_double(double a, double b) // NOLINT +static cJSON_bool compare_double(double a, double b) { - double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b); - return (fabs(a - b) <= maxVal * DBL_EPSILON); + double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b); + return (fabs(a - b) <= maxVal * DBL_EPSILON); } /* Render the number nicely from the given item into a string. */ -static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer) // NOLINT -{ - unsigned char *output_pointer = NULL; - double d = item->valuedouble; - int length = 0; - size_t i = 0; - unsigned char number_buffer[26] = {0}; /* temporary buffer to print the number into */ - unsigned char decimal_point = get_decimal_point(); // NOLINT - double test = 0.0; - - if (output_buffer == NULL) - { - return false; - } - - /* This checks for NaN and Infinity */ - if (isnan(d) || isinf(d)) - { - length = snprintf((char*)number_buffer, sizeof(number_buffer) / sizeof(char), "null"); - } - else - { - /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */ - length = snprintf((char*)number_buffer, sizeof(number_buffer) / sizeof(char), "%1.15g", d); - - /* Check whether the original double can be recovered */ - if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || !compare_double((double)test, d)) - { - /* If not, print with 17 decimal places of precision */ - length = snprintf((char*)number_buffer, sizeof(number_buffer) / sizeof(char), "%1.17g", d); - } - } - - /* sprintf failed or buffer overrun occurred */ - if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1))) - { - return false; - } - - /* reserve appropriate space in the output */ - output_pointer = ensure(output_buffer, (size_t)length + sizeof("")); - if (output_pointer == NULL) - { - return false; - } - - /* copy the printed number to the output and replace locale - * dependent decimal point with '.' */ - for (i = 0; i < ((size_t)length); i++) - { - if (number_buffer[i] == decimal_point) - { - output_pointer[i] = '.'; - continue; - } - - output_pointer[i] = number_buffer[i]; - } - output_pointer[i] = '\0'; - - output_buffer->offset += (size_t)length; - - return true; +static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + double d = item->valuedouble; + int length = 0; + size_t i = 0; + unsigned char number_buffer[26] = {0}; /* temporary buffer to print the number into */ + unsigned char decimal_point = get_decimal_point(); + double test = 0.0; + + if (output_buffer == NULL) + { + return false; + } + + /* This checks for NaN and Infinity */ + if (isnan(d) || isinf(d)) + { + length = snprintf((char*)number_buffer, sizeof(number_buffer), "null"); /* Amazon edit */ + } + else if(d == (double)item->valueint) + { + length = snprintf((char*)number_buffer, sizeof(number_buffer), "%d", item->valueint); /* Amazon edit */ + } + else + { + /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */ + length = snprintf((char*)number_buffer, sizeof(number_buffer), "%1.15g", d); /* Amazon edit */ + + /* Check whether the original double can be recovered */ + if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || !compare_double((double)test, d)) + { + /* If not, print with 17 decimal places of precision */ + length = snprintf((char*)number_buffer, sizeof(number_buffer), "%1.17g", d); /* Amazon edit */ + } + } + + /* sprintf failed or buffer overrun occurred */ + if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1))) + { + return false; + } + + /* reserve appropriate space in the output */ + output_pointer = ensure(output_buffer, (size_t)length + sizeof("")); + if (output_pointer == NULL) + { + return false; + } + + /* copy the printed number to the output and replace locale + * dependent decimal point with '.' */ + for (i = 0; i < ((size_t)length); i++) + { + if (number_buffer[i] == decimal_point) + { + output_pointer[i] = '.'; + continue; + } + + output_pointer[i] = number_buffer[i]; + } + output_pointer[i] = '\0'; + + output_buffer->offset += (size_t)length; + + return true; } /* parse 4 digit hexadecimal number */ static unsigned parse_hex4(const unsigned char * const input) { - unsigned int h = 0; - size_t i = 0; - - for (i = 0; i < 4; i++) - { - /* parse digit */ - if ((input[i] >= '0') && (input[i] <= '9')) - { - h += (unsigned int) input[i] - '0'; - } - else if ((input[i] >= 'A') && (input[i] <= 'F')) - { - h += (unsigned int) 10 + input[i] - 'A'; - } - else if ((input[i] >= 'a') && (input[i] <= 'f')) - { - h += (unsigned int) 10 + input[i] - 'a'; - } - else /* invalid */ - { - return 0; - } - - if (i < 3) - { - /* shift left to make place for the next nibble */ - h = h << 4; - } - } - - return h; + unsigned int h = 0; + size_t i = 0; + + for (i = 0; i < 4; i++) + { + /* parse digit */ + if ((input[i] >= '0') && (input[i] <= '9')) + { + h += (unsigned int) input[i] - '0'; + } + else if ((input[i] >= 'A') && (input[i] <= 'F')) + { + h += (unsigned int) 10 + input[i] - 'A'; + } + else if ((input[i] >= 'a') && (input[i] <= 'f')) + { + h += (unsigned int) 10 + input[i] - 'a'; + } + else /* invalid */ + { + return 0; + } + + if (i < 3) + { + /* shift left to make place for the next nibble */ + h = h << 4; + } + } + + return h; } /* converts a UTF-16 literal to UTF-8 -* A literal can be one or two sequences of the form \uXXXX */ + * A literal can be one or two sequences of the form \uXXXX */ static unsigned char utf16_literal_to_utf8(const unsigned char * const input_pointer, const unsigned char * const input_end, unsigned char **output_pointer) { - long unsigned int codepoint = 0; - unsigned int first_code = 0; - const unsigned char *first_sequence = input_pointer; - unsigned char utf8_length = 0; - unsigned char utf8_position = 0; - unsigned char sequence_length = 0; - unsigned char first_byte_mark = 0; - - if ((input_end - first_sequence) < 6) - { - /* input ends unexpectedly */ - goto fail; - } - - /* get the first utf16 sequence */ - first_code = parse_hex4(first_sequence + 2); - - /* check that the code is valid */ - if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) - { - goto fail; - } - - /* UTF16 surrogate pair */ - if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) - { - const unsigned char *second_sequence = first_sequence + 6; - unsigned int second_code = 0; - sequence_length = 12; /* \uXXXX\uXXXX */ - - if ((input_end - second_sequence) < 6) - { - /* input ends unexpectedly */ - goto fail; - } - - if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u')) - { - /* missing second half of the surrogate pair */ - goto fail; - } - - /* get the second utf16 sequence */ - second_code = parse_hex4(second_sequence + 2); - /* check that the code is valid */ - if ((second_code < 0xDC00) || (second_code > 0xDFFF)) - { - /* invalid second half of the surrogate pair */ - goto fail; - } - - - /* calculate the unicode codepoint from the surrogate pair */ - codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF)); - } - else - { - sequence_length = 6; /* \uXXXX */ - codepoint = first_code; - } - - /* encode as UTF-8 - * takes at maximum 4 bytes to encode: - * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ - if (codepoint < 0x80) - { - /* normal ascii, encoding 0xxxxxxx */ - utf8_length = 1; - } - else if (codepoint < 0x800) - { - /* two bytes, encoding 110xxxxx 10xxxxxx */ - utf8_length = 2; - first_byte_mark = 0xC0; /* 11000000 */ - } - else if (codepoint < 0x10000) - { - /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */ - utf8_length = 3; - first_byte_mark = 0xE0; /* 11100000 */ - } - else if (codepoint <= 0x10FFFF) - { - /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */ - utf8_length = 4; - first_byte_mark = 0xF0; /* 11110000 */ - } - else - { - /* invalid unicode codepoint */ - goto fail; - } - - /* encode as utf8 */ - for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--) - { - /* 10xxxxxx */ - (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF); - codepoint >>= 6; - } - /* encode first byte */ - if (utf8_length > 1) - { - (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF); - } - else - { - (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F); - } - - *output_pointer += utf8_length; - - return sequence_length; + long unsigned int codepoint = 0; + unsigned int first_code = 0; + const unsigned char *first_sequence = input_pointer; + unsigned char utf8_length = 0; + unsigned char utf8_position = 0; + unsigned char sequence_length = 0; + unsigned char first_byte_mark = 0; + + if ((input_end - first_sequence) < 6) + { + /* input ends unexpectedly */ + goto fail; + } + + /* get the first utf16 sequence */ + first_code = parse_hex4(first_sequence + 2); + + /* check that the code is valid */ + if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) + { + goto fail; + } + + /* UTF16 surrogate pair */ + if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) + { + const unsigned char *second_sequence = first_sequence + 6; + unsigned int second_code = 0; + sequence_length = 12; /* \uXXXX\uXXXX */ + + if ((input_end - second_sequence) < 6) + { + /* input ends unexpectedly */ + goto fail; + } + + if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u')) + { + /* missing second half of the surrogate pair */ + goto fail; + } + + /* get the second utf16 sequence */ + second_code = parse_hex4(second_sequence + 2); + /* check that the code is valid */ + if ((second_code < 0xDC00) || (second_code > 0xDFFF)) + { + /* invalid second half of the surrogate pair */ + goto fail; + } + + + /* calculate the unicode codepoint from the surrogate pair */ + codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF)); + } + else + { + sequence_length = 6; /* \uXXXX */ + codepoint = first_code; + } + + /* encode as UTF-8 + * takes at maximum 4 bytes to encode: + * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ + if (codepoint < 0x80) + { + /* normal ascii, encoding 0xxxxxxx */ + utf8_length = 1; + } + else if (codepoint < 0x800) + { + /* two bytes, encoding 110xxxxx 10xxxxxx */ + utf8_length = 2; + first_byte_mark = 0xC0; /* 11000000 */ + } + else if (codepoint < 0x10000) + { + /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */ + utf8_length = 3; + first_byte_mark = 0xE0; /* 11100000 */ + } + else if (codepoint <= 0x10FFFF) + { + /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */ + utf8_length = 4; + first_byte_mark = 0xF0; /* 11110000 */ + } + else + { + /* invalid unicode codepoint */ + goto fail; + } + + /* encode as utf8 */ + for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--) + { + /* 10xxxxxx */ + (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF); + codepoint >>= 6; + } + /* encode first byte */ + if (utf8_length > 1) + { + (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF); + } + else + { + (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F); + } + + *output_pointer += utf8_length; + + return sequence_length; fail: - return 0; + return 0; } /* Parse the input text into an unescaped cinput, and populate item. */ -static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer) // NOLINT -{ - const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1; - const unsigned char *input_end = buffer_at_offset(input_buffer) + 1; - unsigned char *output_pointer = NULL; - unsigned char *output = NULL; - - /* not a string */ - if (buffer_at_offset(input_buffer)[0] != '\"') - { - goto fail; - } - - { - /* calculate approximate size of the output (overestimate) */ - size_t allocation_length = 0; - size_t skipped_bytes = 0; - while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"')) - { - /* is escape sequence */ - if (input_end[0] == '\\') - { - if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length) - { - /* prevent buffer overflow when last input character is a backslash */ - goto fail; - } - skipped_bytes++; - input_end++; - } - input_end++; - } - if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"')) - { - goto fail; /* string ended unexpectedly */ - } - - /* This is at most how much we need for the output */ - allocation_length = (size_t) (input_end - buffer_at_offset(input_buffer)) - skipped_bytes; - output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof("")); - if (output == NULL) - { - goto fail; /* allocation failure */ - } - } - - output_pointer = output; - /* loop through the string literal */ - while (input_pointer < input_end) - { - if (*input_pointer != '\\') - { - *output_pointer++ = *input_pointer++; - } - /* escape sequence */ - else - { - unsigned char sequence_length = 2; - if ((input_end - input_pointer) < 1) - { - goto fail; - } - - switch (input_pointer[1]) - { - case 'b': - *output_pointer++ = '\b'; - break; - case 'f': - *output_pointer++ = '\f'; - break; - case 'n': - *output_pointer++ = '\n'; - break; - case 'r': - *output_pointer++ = '\r'; - break; - case 't': - *output_pointer++ = '\t'; - break; - case '\"': - case '\\': - case '/': - *output_pointer++ = input_pointer[1]; - break; - - /* UTF-16 literal */ - case 'u': - sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer); - if (sequence_length == 0) - { - /* failed to convert UTF16-literal to UTF-8 */ - goto fail; - } - break; - - default: - goto fail; - } - input_pointer += sequence_length; - } - } - - /* zero terminate the output */ - *output_pointer = '\0'; - - item->type = cJSON_String; - item->valuestring = (char*)output; - - input_buffer->offset = (size_t) (input_end - input_buffer->content); - input_buffer->offset++; - - return true; +static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer) +{ + const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1; + const unsigned char *input_end = buffer_at_offset(input_buffer) + 1; + unsigned char *output_pointer = NULL; + unsigned char *output = NULL; + + /* not a string */ + if (buffer_at_offset(input_buffer)[0] != '\"') + { + goto fail; + } + + { + /* calculate approximate size of the output (overestimate) */ + size_t allocation_length = 0; + size_t skipped_bytes = 0; + while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"')) + { + /* is escape sequence */ + if (input_end[0] == '\\') + { + if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length) + { + /* prevent buffer overflow when last input character is a backslash */ + goto fail; + } + skipped_bytes++; + input_end++; + } + input_end++; + } + if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"')) + { + goto fail; /* string ended unexpectedly */ + } + + /* This is at most how much we need for the output */ + allocation_length = (size_t) (input_end - buffer_at_offset(input_buffer)) - skipped_bytes; + output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof("")); + if (output == NULL) + { + goto fail; /* allocation failure */ + } + } + + output_pointer = output; + /* loop through the string literal */ + while (input_pointer < input_end) + { + if (*input_pointer != '\\') + { + *output_pointer++ = *input_pointer++; + } + /* escape sequence */ + else + { + unsigned char sequence_length = 2; + if ((input_end - input_pointer) < 1) + { + goto fail; + } + + switch (input_pointer[1]) + { + case 'b': + *output_pointer++ = '\b'; + break; + case 'f': + *output_pointer++ = '\f'; + break; + case 'n': + *output_pointer++ = '\n'; + break; + case 'r': + *output_pointer++ = '\r'; + break; + case 't': + *output_pointer++ = '\t'; + break; + case '\"': + case '\\': + case '/': + *output_pointer++ = input_pointer[1]; + break; + + /* UTF-16 literal */ + case 'u': + sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer); + if (sequence_length == 0) + { + /* failed to convert UTF16-literal to UTF-8 */ + goto fail; + } + break; + + default: + goto fail; + } + input_pointer += sequence_length; + } + } + + /* zero terminate the output */ + *output_pointer = '\0'; + + item->type = cJSON_String; + item->valuestring = (char*)output; + + input_buffer->offset = (size_t) (input_end - input_buffer->content); + input_buffer->offset++; + + return true; fail: - if (output != NULL) - { - input_buffer->hooks.deallocate(output); - } + if (output != NULL) + { + input_buffer->hooks.deallocate(output); + } - if (input_pointer != NULL) - { - input_buffer->offset = (size_t)(input_pointer - input_buffer->content); - } + if (input_pointer != NULL) + { + input_buffer->offset = (size_t)(input_pointer - input_buffer->content); + } - return false; + return false; } /* Render the cstring provided to an escaped version that can be printed. */ -static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer) // NOLINT -{ - const unsigned char *input_pointer = NULL; - unsigned char *output = NULL; - unsigned char *output_pointer = NULL; - size_t output_length = 0; - /* numbers of additional characters needed for escaping */ - size_t escape_characters = 0; - - if (output_buffer == NULL) - { - return false; - } - - /* empty string */ - if (input == NULL) - { - output = ensure(output_buffer, sizeof("\"\"")); - if (output == NULL) - { - return false; - } - memcpy(output, "\"\"", 3); /* NOLINT */ - - return true; - } - - /* set "flag" to 1 if something needs to be escaped */ - for (input_pointer = input; *input_pointer; input_pointer++) - { - switch (*input_pointer) - { - case '\"': - case '\\': - case '\b': - case '\f': - case '\n': - case '\r': - case '\t': - /* one character escape sequence */ - escape_characters++; - break; - default: - if (*input_pointer < 32) - { - /* UTF-16 escape sequence uXXXX */ - escape_characters += 5; - } - break; - } - } - output_length = (size_t)(input_pointer - input) + escape_characters; - - output = ensure(output_buffer, output_length + sizeof("\"\"")); - if (output == NULL) - { - return false; - } - - /* no characters have to be escaped */ - if (escape_characters == 0) - { - output[0] = '\"'; - memcpy(output + 1, input, output_length); - output[output_length + 1] = '\"'; - output[output_length + 2] = '\0'; - - return true; - } - - output[0] = '\"'; - output_pointer = output + 1; - /* copy the string */ - for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++) - { - if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\')) - { - /* normal character, copy */ - *output_pointer = *input_pointer; - } - else - { - /* character needs to be escaped */ - *output_pointer++ = '\\'; - switch (*input_pointer) - { - case '\\': - *output_pointer = '\\'; - break; - case '\"': - *output_pointer = '\"'; - break; - case '\b': - *output_pointer = 'b'; - break; - case '\f': - *output_pointer = 'f'; - break; - case '\n': - *output_pointer = 'n'; - break; - case '\r': - *output_pointer = 'r'; - break; - case '\t': - *output_pointer = 't'; - break; - default: - /* escape and print as unicode codepoint */ - snprintf((char*)output_pointer, 6 * sizeof(char), "u%04x", *input_pointer); - output_pointer += 4; - break; - } - } - } - output[output_length + 1] = '\"'; - output[output_length + 2] = '\0'; - - return true; +static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer) +{ + const unsigned char *input_pointer = NULL; + unsigned char *output = NULL; + unsigned char *output_pointer = NULL; + size_t output_length = 0; + /* numbers of additional characters needed for escaping */ + size_t escape_characters = 0; + + if (output_buffer == NULL) + { + return false; + } + + /* empty string */ + if (input == NULL) + { + output = ensure(output_buffer, sizeof("\"\"")); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "\"\""); + + return true; + } + + /* set "flag" to 1 if something needs to be escaped */ + for (input_pointer = input; *input_pointer; input_pointer++) + { + switch (*input_pointer) + { + case '\"': + case '\\': + case '\b': + case '\f': + case '\n': + case '\r': + case '\t': + /* one character escape sequence */ + escape_characters++; + break; + default: + if (*input_pointer < 32) + { + /* UTF-16 escape sequence uXXXX */ + escape_characters += 5; + } + break; + } + } + output_length = (size_t)(input_pointer - input) + escape_characters; + + output = ensure(output_buffer, output_length + sizeof("\"\"")); + if (output == NULL) + { + return false; + } + + /* no characters have to be escaped */ + if (escape_characters == 0) + { + output[0] = '\"'; + memcpy(output + 1, input, output_length); + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; + } + + output[0] = '\"'; + output_pointer = output + 1; + /* copy the string */ + for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++) + { + if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\')) + { + /* normal character, copy */ + *output_pointer = *input_pointer; + } + else + { + /* character needs to be escaped */ + *output_pointer++ = '\\'; + switch (*input_pointer) + { + case '\\': + *output_pointer = '\\'; + break; + case '\"': + *output_pointer = '\"'; + break; + case '\b': + *output_pointer = 'b'; + break; + case '\f': + *output_pointer = 'f'; + break; + case '\n': + *output_pointer = 'n'; + break; + case '\r': + *output_pointer = 'r'; + break; + case '\t': + *output_pointer = 't'; + break; + default: + /* escape and print as unicode codepoint */ + snprintf((char*)output_pointer, 6, "u%04x", *input_pointer); /* Amazon edit */ + output_pointer += 4; + break; + } + } + } + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; } /* Invoke print_string_ptr (which is useful) on an item. */ static cJSON_bool print_string(const cJSON * const item, printbuffer * const p) { - return print_string_ptr((unsigned char*)item->valuestring, p); + return print_string_ptr((unsigned char*)item->valuestring, p); } /* Predeclare these prototypes. */ @@ -1026,2077 +1050,2096 @@ static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_bu static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer); /* Utility to jump whitespace and cr/lf */ -static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer) // NOLINT +static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer) { - if ((buffer == NULL) || (buffer->content == NULL)) - { - return NULL; - } + if ((buffer == NULL) || (buffer->content == NULL)) + { + return NULL; + } - if (cannot_access_at_index(buffer, 0)) - { - return buffer; - } + if (cannot_access_at_index(buffer, 0)) + { + return buffer; + } - while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32)) - { - buffer->offset++; - } + while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32)) + { + buffer->offset++; + } - if (buffer->offset == buffer->length) - { - buffer->offset--; - } + if (buffer->offset == buffer->length) + { + buffer->offset--; + } - return buffer; + return buffer; } /* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */ -static parse_buffer *skip_utf8_bom(parse_buffer * const buffer) // NOLINT +static parse_buffer *skip_utf8_bom(parse_buffer * const buffer) { - if ((buffer == NULL) || (buffer->content == NULL) || (buffer->offset != 0)) - { - return NULL; - } + if ((buffer == NULL) || (buffer->content == NULL) || (buffer->offset != 0)) + { + return NULL; + } - if (can_access_at_index(buffer, 4) && (strncmp((const char*)buffer_at_offset(buffer), "\xEF\xBB\xBF", 3) == 0)) - { - buffer->offset += 3; - } + if (can_access_at_index(buffer, 4) && (strncmp((const char*)buffer_at_offset(buffer), "\xEF\xBB\xBF", 3) == 0)) + { + buffer->offset += 3; + } - return buffer; + return buffer; } CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated) { - size_t buffer_length; + size_t buffer_length; - if (NULL == value) - { - return NULL; - } + if (NULL == value) + { + return NULL; + } - /* Adding null character size due to require_null_terminated. */ - buffer_length = strlen(value) + sizeof(""); + /* Adding null character size due to require_null_terminated. */ + buffer_length = strlen(value) + sizeof(""); - return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end, require_null_terminated); + return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end, require_null_terminated); } /* Parse an object - create a new root, and populate. */ CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated) { - parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } }; - cJSON *item = NULL; - - if (value == NULL || 0 == buffer_length) - { - goto fail; - } - - buffer.content = (const unsigned char*)value; - buffer.length = buffer_length; - buffer.offset = 0; - buffer.hooks = global_hooks; - - item = cJSON_New_Item(&global_hooks); - if (item == NULL) /* memory fail */ - { - goto fail; - } - - if (!parse_value(item, buffer_skip_whitespace(skip_utf8_bom(&buffer)))) - { - /* parse failure. ep is set. */ - goto fail; - } - - /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */ - if (require_null_terminated) - { - buffer_skip_whitespace(&buffer); - if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0') - { - goto fail; - } - } - if (return_parse_end) - { - *return_parse_end = (const char*)buffer_at_offset(&buffer); - } - - return item; + parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } }; + cJSON *item = NULL; + +#if 0 /* Amazon edit */ + /* reset error position */ + global_error.json = NULL; + global_error.position = 0; +#endif /* Amazon edit */ + + if (value == NULL || 0 == buffer_length) + { + goto fail; + } + + buffer.content = (const unsigned char*)value; + buffer.length = buffer_length; + buffer.offset = 0; + buffer.hooks = global_hooks; + + item = cJSON_New_Item(&global_hooks); + if (item == NULL) /* memory fail */ + { + goto fail; + } + + if (!parse_value(item, buffer_skip_whitespace(skip_utf8_bom(&buffer)))) + { + /* parse failure. ep is set. */ + goto fail; + } + + /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */ + if (require_null_terminated) + { + buffer_skip_whitespace(&buffer); + if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0') + { + goto fail; + } + } + if (return_parse_end) + { + *return_parse_end = (const char*)buffer_at_offset(&buffer); + } + + return item; fail: - if (item != NULL) - { - cJSON_Delete(item); - } - - if (value != NULL) - { - error local_error; - local_error.json = (const unsigned char*)value; - local_error.position = 0; - - if (buffer.offset < buffer.length) - { - local_error.position = buffer.offset; - } - else if (buffer.length > 0) - { - local_error.position = buffer.length - 1; - } - - if (return_parse_end != NULL) - { - *return_parse_end = (const char*)local_error.json + local_error.position; - } - - } - - return NULL; + if (item != NULL) + { + cJSON_Delete(item); + } + + if (value != NULL) + { + error local_error; + local_error.json = (const unsigned char*)value; + local_error.position = 0; + + if (buffer.offset < buffer.length) + { + local_error.position = buffer.offset; + } + else if (buffer.length > 0) + { + local_error.position = buffer.length - 1; + } + + if (return_parse_end != NULL) + { + *return_parse_end = (const char*)local_error.json + local_error.position; + } + +#if 0 /* Amazon edit */ + global_error = local_error; +#endif /* Amazon edit */ + } + + return NULL; } /* Default options for cJSON_Parse */ CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value) { - return cJSON_ParseWithOpts(value, 0, 0); + return cJSON_ParseWithOpts(value, 0, 0); } CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length) { - return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0); + return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0); } #define cjson_min(a, b) (((a) < (b)) ? (a) : (b)) static unsigned char *print(const cJSON * const item, cJSON_bool format, const internal_hooks * const hooks) { - static const size_t default_buffer_size = 256; - printbuffer buffer[1]; - unsigned char *printed = NULL; - - memset(buffer, 0, sizeof(buffer)); - - /* create buffer */ - buffer->buffer = (unsigned char*) hooks->allocate(default_buffer_size); - buffer->length = default_buffer_size; - buffer->format = format; - buffer->hooks = *hooks; - if (buffer->buffer == NULL) - { - goto fail; - } - - /* print the value */ - if (!print_value(item, buffer)) - { - goto fail; - } - update_offset(buffer); - - /* check if reallocate is available */ - if (hooks->reallocate != NULL) - { - printed = (unsigned char*) hooks->reallocate(buffer->buffer, buffer->offset + 1); - if (printed == NULL) { - goto fail; - } - buffer->buffer = NULL; - } - else /* otherwise copy the JSON over to a new buffer */ - { - printed = (unsigned char*) hooks->allocate(buffer->offset + 1); - if (printed == NULL) - { - goto fail; - } - memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1)); - printed[buffer->offset] = '\0'; /* just to be sure */ - - /* free the buffer */ - hooks->deallocate(buffer->buffer); - } - - return printed; + static const size_t default_buffer_size = 256; + printbuffer buffer[1]; + unsigned char *printed = NULL; + + memset(buffer, 0, sizeof(buffer)); + + /* create buffer */ + buffer->buffer = (unsigned char*) hooks->allocate(default_buffer_size); + buffer->length = default_buffer_size; + buffer->format = format; + buffer->hooks = *hooks; + if (buffer->buffer == NULL) + { + goto fail; + } + + /* print the value */ + if (!print_value(item, buffer)) + { + goto fail; + } + update_offset(buffer); + + /* check if reallocate is available */ + if (hooks->reallocate != NULL) + { + printed = (unsigned char*) hooks->reallocate(buffer->buffer, buffer->offset + 1); + if (printed == NULL) { + goto fail; + } + buffer->buffer = NULL; + } + else /* otherwise copy the JSON over to a new buffer */ + { + printed = (unsigned char*) hooks->allocate(buffer->offset + 1); + if (printed == NULL) + { + goto fail; + } + memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1)); + printed[buffer->offset] = '\0'; /* just to be sure */ + + /* free the buffer */ + hooks->deallocate(buffer->buffer); + } + + return printed; fail: - if (buffer->buffer != NULL) - { - hooks->deallocate(buffer->buffer); - } + if (buffer->buffer != NULL) + { + hooks->deallocate(buffer->buffer); + } - if (printed != NULL) - { - hooks->deallocate(printed); - } + if (printed != NULL) + { + hooks->deallocate(printed); + } - return NULL; + return NULL; } /* Render a cJSON item/entity/structure to text. */ CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item) { - return (char*)print(item, true, &global_hooks); + return (char*)print(item, true, &global_hooks); } CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item) { - return (char*)print(item, false, &global_hooks); + return (char*)print(item, false, &global_hooks); } CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt) { - printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; + printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; - if (prebuffer < 0) - { - return NULL; - } + if (prebuffer < 0) + { + return NULL; + } - p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer); - if (!p.buffer) - { - return NULL; - } + p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer); + if (!p.buffer) + { + return NULL; + } - p.length = (size_t)prebuffer; - p.offset = 0; - p.noalloc = false; - p.format = fmt; - p.hooks = global_hooks; + p.length = (size_t)prebuffer; + p.offset = 0; + p.noalloc = false; + p.format = fmt; + p.hooks = global_hooks; - if (!print_value(item, &p)) - { - global_hooks.deallocate(p.buffer); - return NULL; - } + if (!print_value(item, &p)) + { + global_hooks.deallocate(p.buffer); + return NULL; + } - return (char*)p.buffer; + return (char*)p.buffer; } CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format) { - printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; + printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; - if ((length < 0) || (buffer == NULL)) - { - return false; - } + if ((length < 0) || (buffer == NULL)) + { + return false; + } - p.buffer = (unsigned char*)buffer; - p.length = (size_t)length; - p.offset = 0; - p.noalloc = true; - p.format = format; - p.hooks = global_hooks; + p.buffer = (unsigned char*)buffer; + p.length = (size_t)length; + p.offset = 0; + p.noalloc = true; + p.format = format; + p.hooks = global_hooks; - return print_value(item, &p); + return print_value(item, &p); } /* Parser core - when encountering text, process appropriately. */ static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer) { - if ((input_buffer == NULL) || (input_buffer->content == NULL)) - { - return false; /* no input */ - } - - /* parse the different types of values */ - /* null */ - if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0)) - { - item->type = cJSON_NULL; - input_buffer->offset += 4; - return true; - } - /* false */ - if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0)) - { - item->type = cJSON_False; - input_buffer->offset += 5; - return true; - } - /* true */ - if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0)) - { - item->type = cJSON_True; - item->valueint = 1; - input_buffer->offset += 4; - return true; - } - /* string */ - if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"')) - { - return parse_string(item, input_buffer); - } - /* number */ - if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9')))) - { - return parse_number(item, input_buffer); - } - /* array */ - if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '[')) - { - return parse_array(item, input_buffer); - } - /* object */ - if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{')) - { - return parse_object(item, input_buffer); - } - - return false; + if ((input_buffer == NULL) || (input_buffer->content == NULL)) + { + return false; /* no input */ + } + + /* parse the different types of values */ + /* null */ + if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0)) + { + item->type = cJSON_NULL; + input_buffer->offset += 4; + return true; + } + /* false */ + if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0)) + { + item->type = cJSON_False; + input_buffer->offset += 5; + return true; + } + /* true */ + if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0)) + { + item->type = cJSON_True; + item->valueint = 1; + input_buffer->offset += 4; + return true; + } + /* string */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"')) + { + return parse_string(item, input_buffer); + } + /* number */ + if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9')))) + { + return parse_number(item, input_buffer); + } + /* array */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '[')) + { + return parse_array(item, input_buffer); + } + /* object */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{')) + { + return parse_object(item, input_buffer); + } + + return false; } /* Render a value to text. */ static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer) { - unsigned char *output = NULL; - - if ((item == NULL) || (output_buffer == NULL)) - { - return false; - } - - switch ((item->type) & 0xFF) - { - case cJSON_NULL: - output = ensure(output_buffer, 5); - if (output == NULL) - { - return false; - } - memcpy(output, "null", 5); /* NOLINT */ - return true; - - case cJSON_False: - output = ensure(output_buffer, 6); - if (output == NULL) - { - return false; - } - memcpy(output, "false", 6); /* NOLINT */ - return true; - - case cJSON_True: - output = ensure(output_buffer, 5); - if (output == NULL) - { - return false; - } - memcpy(output, "true", 5); /* NOLINT */ - return true; - - case cJSON_Number: - return print_number(item, output_buffer); - - case cJSON_Raw: - { - size_t raw_length = 0; - if (item->valuestring == NULL) - { - return false; - } - - raw_length = strlen(item->valuestring) + sizeof(""); - output = ensure(output_buffer, raw_length); - if (output == NULL) - { - return false; - } - memcpy(output, item->valuestring, raw_length); - return true; - } - - case cJSON_String: - return print_string(item, output_buffer); - - case cJSON_Array: - return print_array(item, output_buffer); - - case cJSON_Object: - return print_object(item, output_buffer); - - default: - return false; - } + unsigned char *output = NULL; + + if ((item == NULL) || (output_buffer == NULL)) + { + return false; + } + + switch ((item->type) & 0xFF) + { + case cJSON_NULL: + output = ensure(output_buffer, 5); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "null"); + return true; + + case cJSON_False: + output = ensure(output_buffer, 6); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "false"); + return true; + + case cJSON_True: + output = ensure(output_buffer, 5); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "true"); + return true; + + case cJSON_Number: + return print_number(item, output_buffer); + + case cJSON_Raw: + { + size_t raw_length = 0; + if (item->valuestring == NULL) + { + return false; + } + + raw_length = strlen(item->valuestring) + sizeof(""); + output = ensure(output_buffer, raw_length); + if (output == NULL) + { + return false; + } + memcpy(output, item->valuestring, raw_length); + return true; + } + + case cJSON_String: + return print_string(item, output_buffer); + + case cJSON_Array: + return print_array(item, output_buffer); + + case cJSON_Object: + return print_object(item, output_buffer); + + default: + return false; + } } /* Build an array from input text. */ static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer) { - cJSON *head = NULL; /* head of the linked list */ - cJSON *current_item = NULL; - - if (input_buffer->depth >= CJSON_NESTING_LIMIT) - { - return false; /* to deeply nested */ - } - input_buffer->depth++; - - if (buffer_at_offset(input_buffer)[0] != '[') - { - /* not an array */ - goto fail; - } - - input_buffer->offset++; - buffer_skip_whitespace(input_buffer); - if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']')) - { - /* empty array */ - goto success; - } - - /* check if we skipped to the end of the buffer */ - if (cannot_access_at_index(input_buffer, 0)) - { - input_buffer->offset--; - goto fail; - } - - /* step back to character in front of the first element */ - input_buffer->offset--; - /* loop through the comma separated array elements */ - do - { - /* allocate next item */ - cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); - if (new_item == NULL) - { - goto fail; /* allocation failure */ - } - - /* attach next item to list */ - if (head == NULL) - { - /* start the linked list */ - current_item = head = new_item; - } - else - { - /* add to the end and advance */ - current_item->next = new_item; - new_item->prev = current_item; - current_item = new_item; - } - - /* parse next value */ - input_buffer->offset++; - buffer_skip_whitespace(input_buffer); - if (!parse_value(current_item, input_buffer)) - { - goto fail; /* failed to parse value */ - } - buffer_skip_whitespace(input_buffer); - } - while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); - - if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']') - { - goto fail; /* expected end of array */ - } + cJSON *head = NULL; /* head of the linked list */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) + { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (buffer_at_offset(input_buffer)[0] != '[') + { + /* not an array */ + goto fail; + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']')) + { + /* empty array */ + goto success; + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) + { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do + { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) + { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) + { + /* start the linked list */ + current_item = head = new_item; + } + else + { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse next value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) + { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } + while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']') + { + goto fail; /* expected end of array */ + } success: - input_buffer->depth--; + input_buffer->depth--; - if (head != NULL) { - head->prev = current_item; - } + if (head != NULL) { + head->prev = current_item; + } - item->type = cJSON_Array; - item->child = head; + item->type = cJSON_Array; + item->child = head; - input_buffer->offset++; + input_buffer->offset++; - return true; + return true; fail: - if (head != NULL) - { - cJSON_Delete(head); - } + if (head != NULL) + { + cJSON_Delete(head); + } - return false; + return false; } /* Render an array to text */ static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer) { - unsigned char *output_pointer = NULL; - size_t length = 0; - cJSON *current_element = item->child; - - if (output_buffer == NULL) - { - return false; - } - - /* Compose the output array. */ - /* opening square bracket */ - output_pointer = ensure(output_buffer, 1); - if (output_pointer == NULL) - { - return false; - } - - *output_pointer = '['; - output_buffer->offset++; - output_buffer->depth++; - - while (current_element != NULL) - { - if (!print_value(current_element, output_buffer)) - { - return false; - } - update_offset(output_buffer); - if (current_element->next) - { - length = (size_t) (output_buffer->format ? 2 : 1); - output_pointer = ensure(output_buffer, length + 1); - if (output_pointer == NULL) - { - return false; - } - *output_pointer++ = ','; - if(output_buffer->format) - { - *output_pointer++ = ' '; - } - *output_pointer = '\0'; - output_buffer->offset += length; - } - current_element = current_element->next; - } - - output_pointer = ensure(output_buffer, 2); - if (output_pointer == NULL) - { - return false; - } - *output_pointer++ = ']'; - *output_pointer = '\0'; - output_buffer->depth--; - - return true; + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_element = item->child; + + if (output_buffer == NULL) + { + return false; + } + + /* Compose the output array. */ + /* opening square bracket */ + output_pointer = ensure(output_buffer, 1); + if (output_pointer == NULL) + { + return false; + } + + *output_pointer = '['; + output_buffer->offset++; + output_buffer->depth++; + + while (current_element != NULL) + { + if (!print_value(current_element, output_buffer)) + { + return false; + } + update_offset(output_buffer); + if (current_element->next) + { + length = (size_t) (output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ','; + if(output_buffer->format) + { + *output_pointer++ = ' '; + } + *output_pointer = '\0'; + output_buffer->offset += length; + } + current_element = current_element->next; + } + + output_pointer = ensure(output_buffer, 2); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ']'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; } /* Build an object from the text. */ static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer) { - cJSON *head = NULL; /* linked list head */ - cJSON *current_item = NULL; - - if (input_buffer->depth >= CJSON_NESTING_LIMIT) - { - return false; /* to deeply nested */ - } - input_buffer->depth++; - - if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{')) - { - goto fail; /* not an object */ - } - - input_buffer->offset++; - buffer_skip_whitespace(input_buffer); - if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}')) - { - goto success; /* empty object */ - } - - /* check if we skipped to the end of the buffer */ - if (cannot_access_at_index(input_buffer, 0)) - { - input_buffer->offset--; - goto fail; - } - - /* step back to character in front of the first element */ - input_buffer->offset--; - /* loop through the comma separated array elements */ - do - { - /* allocate next item */ - cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); - if (new_item == NULL) - { - goto fail; /* allocation failure */ - } - - /* attach next item to list */ - if (head == NULL) - { - /* start the linked list */ - current_item = head = new_item; - } - else - { - /* add to the end and advance */ - current_item->next = new_item; - new_item->prev = current_item; - current_item = new_item; - } - - /* parse the name of the child */ - input_buffer->offset++; - buffer_skip_whitespace(input_buffer); - if (!parse_string(current_item, input_buffer)) - { - goto fail; /* failed to parse name */ - } - buffer_skip_whitespace(input_buffer); - - /* swap valuestring and string, because we parsed the name */ - current_item->string = current_item->valuestring; - current_item->valuestring = NULL; - - if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':')) - { - goto fail; /* invalid object */ - } - - /* parse the value */ - input_buffer->offset++; - buffer_skip_whitespace(input_buffer); - if (!parse_value(current_item, input_buffer)) - { - goto fail; /* failed to parse value */ - } - buffer_skip_whitespace(input_buffer); - } - while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); - - if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}')) - { - goto fail; /* expected end of object */ - } + cJSON *head = NULL; /* linked list head */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) + { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{')) + { + goto fail; /* not an object */ + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}')) + { + goto success; /* empty object */ + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) + { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do + { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) + { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) + { + /* start the linked list */ + current_item = head = new_item; + } + else + { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse the name of the child */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_string(current_item, input_buffer)) + { + goto fail; /* failed to parse name */ + } + buffer_skip_whitespace(input_buffer); + + /* swap valuestring and string, because we parsed the name */ + current_item->string = current_item->valuestring; + current_item->valuestring = NULL; + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':')) + { + goto fail; /* invalid object */ + } + + /* parse the value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) + { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } + while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}')) + { + goto fail; /* expected end of object */ + } success: - input_buffer->depth--; + input_buffer->depth--; - if (head != NULL) { - head->prev = current_item; - } + if (head != NULL) { + head->prev = current_item; + } - item->type = cJSON_Object; - item->child = head; + item->type = cJSON_Object; + item->child = head; - input_buffer->offset++; - return true; + input_buffer->offset++; + return true; fail: - if (head != NULL) - { - cJSON_Delete(head); - } + if (head != NULL) + { + cJSON_Delete(head); + } - return false; + return false; } /* Render an object to text. */ static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer) { - unsigned char *output_pointer = NULL; - size_t length = 0; - cJSON *current_item = item->child; - - if (output_buffer == NULL) - { - return false; - } - - /* Compose the output: */ - length = (size_t) (output_buffer->format ? 2 : 1); /* fmt: {\n */ - output_pointer = ensure(output_buffer, length + 1); - if (output_pointer == NULL) - { - return false; - } - - *output_pointer++ = '{'; - output_buffer->depth++; - if (output_buffer->format) - { - *output_pointer++ = '\n'; - } - output_buffer->offset += length; - - while (current_item) - { - if (output_buffer->format) - { - size_t i; - output_pointer = ensure(output_buffer, output_buffer->depth); - if (output_pointer == NULL) - { - return false; - } - for (i = 0; i < output_buffer->depth; i++) - { - *output_pointer++ = '\t'; - } - output_buffer->offset += output_buffer->depth; - } - - /* print key */ - if (!print_string_ptr((unsigned char*)current_item->string, output_buffer)) - { - return false; - } - update_offset(output_buffer); - - length = (size_t) (output_buffer->format ? 2 : 1); - output_pointer = ensure(output_buffer, length); - if (output_pointer == NULL) - { - return false; - } - *output_pointer++ = ':'; - if (output_buffer->format) - { - *output_pointer++ = '\t'; - } - output_buffer->offset += length; - - /* print value */ - if (!print_value(current_item, output_buffer)) - { - return false; - } - update_offset(output_buffer); - - /* print comma if not last */ - length = ((size_t)(output_buffer->format ? 1 : 0) + (size_t)(current_item->next ? 1 : 0)); - output_pointer = ensure(output_buffer, length + 1); - if (output_pointer == NULL) - { - return false; - } - if (current_item->next) - { - *output_pointer++ = ','; - } - - if (output_buffer->format) - { - *output_pointer++ = '\n'; - } - *output_pointer = '\0'; - output_buffer->offset += length; - - current_item = current_item->next; - } - - output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2); - if (output_pointer == NULL) - { - return false; - } - if (output_buffer->format) - { - size_t i; - for (i = 0; i < (output_buffer->depth - 1); i++) - { - *output_pointer++ = '\t'; - } - } - *output_pointer++ = '}'; - *output_pointer = '\0'; - output_buffer->depth--; - - return true; + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_item = item->child; + + if (output_buffer == NULL) + { + return false; + } + + /* Compose the output: */ + length = (size_t) (output_buffer->format ? 2 : 1); /* fmt: {\n */ + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + + *output_pointer++ = '{'; + output_buffer->depth++; + if (output_buffer->format) + { + *output_pointer++ = '\n'; + } + output_buffer->offset += length; + + while (current_item) + { + if (output_buffer->format) + { + size_t i; + output_pointer = ensure(output_buffer, output_buffer->depth); + if (output_pointer == NULL) + { + return false; + } + for (i = 0; i < output_buffer->depth; i++) + { + *output_pointer++ = '\t'; + } + output_buffer->offset += output_buffer->depth; + } + + /* print key */ + if (!print_string_ptr((unsigned char*)current_item->string, output_buffer)) + { + return false; + } + update_offset(output_buffer); + + length = (size_t) (output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ':'; + if (output_buffer->format) + { + *output_pointer++ = '\t'; + } + output_buffer->offset += length; + + /* print value */ + if (!print_value(current_item, output_buffer)) + { + return false; + } + update_offset(output_buffer); + + /* print comma if not last */ + length = ((size_t)(output_buffer->format ? 1 : 0) + (size_t)(current_item->next ? 1 : 0)); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + if (current_item->next) + { + *output_pointer++ = ','; + } + + if (output_buffer->format) + { + *output_pointer++ = '\n'; + } + *output_pointer = '\0'; + output_buffer->offset += length; + + current_item = current_item->next; + } + + output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2); + if (output_pointer == NULL) + { + return false; + } + if (output_buffer->format) + { + size_t i; + for (i = 0; i < (output_buffer->depth - 1); i++) + { + *output_pointer++ = '\t'; + } + } + *output_pointer++ = '}'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; } /* Get Array size/item / object item. */ CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) { - cJSON *child = NULL; - size_t size = 0; + cJSON *child = NULL; + size_t size = 0; - if (array == NULL) - { - return 0; - } + if (array == NULL) + { + return 0; + } - child = array->child; + child = array->child; - while(child != NULL) - { - size++; - child = child->next; - } + while(child != NULL) + { + size++; + child = child->next; + } - /* FIXME: Can overflow here. Cannot be fixed without breaking the API */ + /* FIXME: Can overflow here. Cannot be fixed without breaking the API */ - return (int)size; + return (int)size; } static cJSON* get_array_item(const cJSON *array, size_t index) { - cJSON *current_child = NULL; + cJSON *current_child = NULL; - if (array == NULL) - { - return NULL; - } + if (array == NULL) + { + return NULL; + } - current_child = array->child; - while ((current_child != NULL) && (index > 0)) - { - index--; - current_child = current_child->next; - } + current_child = array->child; + while ((current_child != NULL) && (index > 0)) + { + index--; + current_child = current_child->next; + } - return current_child; + return current_child; } CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) { - if (index < 0) - { - return NULL; - } + if (index < 0) + { + return NULL; + } - return get_array_item(array, (size_t)index); + return get_array_item(array, (size_t)index); } static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive) { - cJSON *current_element = NULL; + cJSON *current_element = NULL; - if ((object == NULL) || (name == NULL)) - { - return NULL; - } + if ((object == NULL) || (name == NULL)) + { + return NULL; + } - current_element = object->child; - if (case_sensitive) - { - while ((current_element != NULL) && (current_element->string != NULL) && (strcmp(name, current_element->string) != 0)) - { - current_element = current_element->next; - } - } - else - { - while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0)) - { - current_element = current_element->next; - } - } + current_element = object->child; + if (case_sensitive) + { + while ((current_element != NULL) && (current_element->string != NULL) && (strcmp(name, current_element->string) != 0)) + { + current_element = current_element->next; + } + } + else + { + while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0)) + { + current_element = current_element->next; + } + } - if ((current_element == NULL) || (current_element->string == NULL)) { - return NULL; - } + if ((current_element == NULL) || (current_element->string == NULL)) { + return NULL; + } - return current_element; + return current_element; } CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string) { - return get_object_item(object, string, false); + return get_object_item(object, string, false); } CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string) { - return get_object_item(object, string, true); + return get_object_item(object, string, true); } CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string) { - return cJSON_GetObjectItem(object, string) ? 1 : 0; + return cJSON_GetObjectItem(object, string) ? 1 : 0; } /* Utility for array list handling. */ static void suffix_object(cJSON *prev, cJSON *item) { - prev->next = item; - item->prev = prev; + prev->next = item; + item->prev = prev; } /* Utility for handling references. */ static cJSON *create_reference(const cJSON *item, const internal_hooks * const hooks) { - cJSON *reference = NULL; - if (item == NULL) - { - return NULL; - } + cJSON *reference = NULL; + if (item == NULL) + { + return NULL; + } - reference = cJSON_New_Item(hooks); - if (reference == NULL) - { - return NULL; - } + reference = cJSON_New_Item(hooks); + if (reference == NULL) + { + return NULL; + } - memcpy(reference, item, sizeof(cJSON)); - reference->string = NULL; - reference->type |= cJSON_IsReference; - reference->next = reference->prev = NULL; - return reference; + memcpy(reference, item, sizeof(cJSON)); + reference->string = NULL; + reference->type |= cJSON_IsReference; + reference->next = reference->prev = NULL; + return reference; } static cJSON_bool add_item_to_array(cJSON *array, cJSON *item) { - cJSON *child = NULL; - - if ((item == NULL) || (array == NULL) || (array == item)) - { - return false; - } - - child = array->child; - /* - * To find the last item in array quickly, we use prev in array - */ - if (child == NULL) - { - /* list is empty, start new one */ - array->child = item; - item->prev = item; - item->next = NULL; - } - else - { - /* append to the end */ - if (child->prev) - { - suffix_object(child->prev, item); - array->child->prev = item; - } - } - - return true; + cJSON *child = NULL; + + if ((item == NULL) || (array == NULL) || (array == item)) + { + return false; + } + + child = array->child; + /* + * To find the last item in array quickly, we use prev in array + */ + if (child == NULL) + { + /* list is empty, start new one */ + array->child = item; + item->prev = item; + item->next = NULL; + } + else + { + /* append to the end */ + if (child->prev) + { + suffix_object(child->prev, item); + array->child->prev = item; + } + } + + return true; } /* Add item to array/object. */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item) { - return add_item_to_array(array, item); + return add_item_to_array(array, item); } #if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) - #pragma GCC diagnostic push + #pragma GCC diagnostic push #endif #ifdef __GNUC__ - #if ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 1))) - #pragma GCC diagnostic ignored "-Wcast-qual" - #endif +#pragma GCC diagnostic ignored "-Wcast-qual" #endif /* helper function to cast away const */ static void* cast_away_const(const void* string) { - return (void*)string; + return (void*)string; } #if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) - #pragma GCC diagnostic pop + #pragma GCC diagnostic pop #endif static cJSON_bool add_item_to_object(cJSON * const object, const char * const string, cJSON * const item, const internal_hooks * const hooks, const cJSON_bool constant_key) { - char *new_key = NULL; - int new_type = cJSON_Invalid; + char *new_key = NULL; + int new_type = cJSON_Invalid; - if ((object == NULL) || (string == NULL) || (item == NULL) || (object == item)) - { - return false; - } + if ((object == NULL) || (string == NULL) || (item == NULL) || (object == item)) + { + return false; + } - if (constant_key) - { - new_key = (char*)cast_away_const(string); - new_type = item->type | cJSON_StringIsConst; - } - else - { - new_key = (char*)cJSON_strdup((const unsigned char*)string, hooks); - if (new_key == NULL) - { - return false; - } + if (constant_key) + { + new_key = (char*)cast_away_const(string); + new_type = item->type | cJSON_StringIsConst; + } + else + { + new_key = (char*)cJSON_strdup((const unsigned char*)string, hooks); + if (new_key == NULL) + { + return false; + } - new_type = item->type & ~cJSON_StringIsConst; - } + new_type = item->type & ~cJSON_StringIsConst; + } - if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) - { - hooks->deallocate(item->string); - } + if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) + { + hooks->deallocate(item->string); + } - item->string = new_key; - item->type = new_type; + item->string = new_key; + item->type = new_type; - return add_item_to_array(object, item); + return add_item_to_array(object, item); } CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) { - return add_item_to_object(object, string, item, &global_hooks, false); + return add_item_to_object(object, string, item, &global_hooks, false); } /* Add an item to an object with constant string as key */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) { - return add_item_to_object(object, string, item, &global_hooks, true); + return add_item_to_object(object, string, item, &global_hooks, true); } CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) { - if (array == NULL) - { - return false; - } + if (array == NULL) + { + return false; + } - return add_item_to_array(array, create_reference(item, &global_hooks)); + return add_item_to_array(array, create_reference(item, &global_hooks)); } CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) { - if ((object == NULL) || (string == NULL)) - { - return false; - } + if ((object == NULL) || (string == NULL)) + { + return false; + } - return add_item_to_object(object, string, create_reference(item, &global_hooks), &global_hooks, false); + return add_item_to_object(object, string, create_reference(item, &global_hooks), &global_hooks, false); } CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name) { - cJSON *null = cJSON_CreateNull(); - if (add_item_to_object(object, name, null, &global_hooks, false)) - { - return null; - } + cJSON *null = cJSON_CreateNull(); + if (add_item_to_object(object, name, null, &global_hooks, false)) + { + return null; + } - cJSON_Delete(null); - return NULL; + cJSON_Delete(null); + return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name) { - cJSON *true_item = cJSON_CreateTrue(); - if (add_item_to_object(object, name, true_item, &global_hooks, false)) - { - return true_item; - } + cJSON *true_item = cJSON_CreateTrue(); + if (add_item_to_object(object, name, true_item, &global_hooks, false)) + { + return true_item; + } - cJSON_Delete(true_item); - return NULL; + cJSON_Delete(true_item); + return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name) { - cJSON *false_item = cJSON_CreateFalse(); - if (add_item_to_object(object, name, false_item, &global_hooks, false)) - { - return false_item; - } + cJSON *false_item = cJSON_CreateFalse(); + if (add_item_to_object(object, name, false_item, &global_hooks, false)) + { + return false_item; + } - cJSON_Delete(false_item); - return NULL; + cJSON_Delete(false_item); + return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean) { - cJSON *bool_item = cJSON_CreateBool(boolean); - if (add_item_to_object(object, name, bool_item, &global_hooks, false)) - { - return bool_item; - } + cJSON *bool_item = cJSON_CreateBool(boolean); + if (add_item_to_object(object, name, bool_item, &global_hooks, false)) + { + return bool_item; + } - cJSON_Delete(bool_item); - return NULL; + cJSON_Delete(bool_item); + return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number) { - cJSON *number_item = cJSON_CreateNumber(number); - if (add_item_to_object(object, name, number_item, &global_hooks, false)) - { - return number_item; - } + cJSON *number_item = cJSON_CreateNumber(number); + if (add_item_to_object(object, name, number_item, &global_hooks, false)) + { + return number_item; + } - cJSON_Delete(number_item); - return NULL; + cJSON_Delete(number_item); + return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string) { - cJSON *string_item = cJSON_CreateString(string); - if (add_item_to_object(object, name, string_item, &global_hooks, false)) - { - return string_item; - } + cJSON *string_item = cJSON_CreateString(string); + if (add_item_to_object(object, name, string_item, &global_hooks, false)) + { + return string_item; + } - cJSON_Delete(string_item); - return NULL; + cJSON_Delete(string_item); + return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw) { - cJSON *raw_item = cJSON_CreateRaw(raw); - if (add_item_to_object(object, name, raw_item, &global_hooks, false)) - { - return raw_item; - } + cJSON *raw_item = cJSON_CreateRaw(raw); + if (add_item_to_object(object, name, raw_item, &global_hooks, false)) + { + return raw_item; + } - cJSON_Delete(raw_item); - return NULL; + cJSON_Delete(raw_item); + return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name) { - cJSON *object_item = cJSON_CreateObject(); - if (add_item_to_object(object, name, object_item, &global_hooks, false)) - { - return object_item; - } + cJSON *object_item = cJSON_CreateObject(); + if (add_item_to_object(object, name, object_item, &global_hooks, false)) + { + return object_item; + } - cJSON_Delete(object_item); - return NULL; + cJSON_Delete(object_item); + return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name) { - cJSON *array = cJSON_CreateArray(); - if (add_item_to_object(object, name, array, &global_hooks, false)) - { - return array; - } + cJSON *array = cJSON_CreateArray(); + if (add_item_to_object(object, name, array, &global_hooks, false)) + { + return array; + } - cJSON_Delete(array); - return NULL; + cJSON_Delete(array); + return NULL; } CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item) { - if ((parent == NULL) || (item == NULL)) - { - return NULL; - } - - if (item != parent->child) - { - /* not the first element */ - item->prev->next = item->next; - } - if (item->next != NULL) - { - /* not the last element */ - item->next->prev = item->prev; - } - - if (item == parent->child) - { - /* first element */ - parent->child = item->next; - } - else if (item->next == NULL) - { - /* last element */ - parent->child->prev = item->prev; - } - - /* make sure the detached item doesn't point anywhere anymore */ - item->prev = NULL; - item->next = NULL; - - return item; + if ((parent == NULL) || (item == NULL)) + { + return NULL; + } + + if (item != parent->child) + { + /* not the first element */ + item->prev->next = item->next; + } + if (item->next != NULL) + { + /* not the last element */ + item->next->prev = item->prev; + } + + if (item == parent->child) + { + /* first element */ + parent->child = item->next; + } + else if (item->next == NULL) + { + /* last element */ + parent->child->prev = item->prev; + } + + /* make sure the detached item doesn't point anywhere anymore */ + item->prev = NULL; + item->next = NULL; + + return item; } CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which) { - if (which < 0) - { - return NULL; - } + if (which < 0) + { + return NULL; + } - return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which)); + return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which)); } CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which) { - cJSON_Delete(cJSON_DetachItemFromArray(array, which)); + cJSON_Delete(cJSON_DetachItemFromArray(array, which)); } CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string) { - cJSON *to_detach = cJSON_GetObjectItem(object, string); + cJSON *to_detach = cJSON_GetObjectItem(object, string); - return cJSON_DetachItemViaPointer(object, to_detach); + return cJSON_DetachItemViaPointer(object, to_detach); } CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string) { - cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string); + cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string); - return cJSON_DetachItemViaPointer(object, to_detach); + return cJSON_DetachItemViaPointer(object, to_detach); } CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string) { - cJSON_Delete(cJSON_DetachItemFromObject(object, string)); + cJSON_Delete(cJSON_DetachItemFromObject(object, string)); } CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string) { - cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string)); + cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string)); } /* Replace array/object items with new ones. */ CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) { - cJSON *after_inserted = NULL; - - if (which < 0) - { - return false; - } - - after_inserted = get_array_item(array, (size_t)which); - if (after_inserted == NULL) - { - return add_item_to_array(array, newitem); - } - - newitem->next = after_inserted; - newitem->prev = after_inserted->prev; - after_inserted->prev = newitem; - if (after_inserted == array->child) - { - array->child = newitem; - } - else - { - newitem->prev->next = newitem; - } - return true; + cJSON *after_inserted = NULL; + + if (which < 0 || newitem == NULL) + { + return false; + } + + after_inserted = get_array_item(array, (size_t)which); + if (after_inserted == NULL) + { + return add_item_to_array(array, newitem); + } + + if (after_inserted != array->child && after_inserted->prev == NULL) { + /* return false if after_inserted is a corrupted array item */ + return false; + } + + newitem->next = after_inserted; + newitem->prev = after_inserted->prev; + after_inserted->prev = newitem; + if (after_inserted == array->child) + { + array->child = newitem; + } + else + { + newitem->prev->next = newitem; + } + return true; } CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement) { - if ((parent == NULL) || (replacement == NULL) || (item == NULL)) - { - return false; - } - - if (replacement == item) - { - return true; - } - - replacement->next = item->next; - replacement->prev = item->prev; - - if (replacement->next != NULL) - { - replacement->next->prev = replacement; - } - if (parent->child == item) - { - if (parent->child->prev == parent->child) - { - replacement->prev = replacement; - } - parent->child = replacement; - } - else - { /* - * To find the last item in array quickly, we use prev in array. - * We can't modify the last item's next pointer where this item was the parent's child - */ - if (replacement->prev != NULL) - { - replacement->prev->next = replacement; - } - if (replacement->next == NULL) - { - parent->child->prev = replacement; - } - } - - item->next = NULL; - item->prev = NULL; - cJSON_Delete(item); - - return true; + if ((parent == NULL) || (parent->child == NULL) || (replacement == NULL) || (item == NULL)) + { + return false; + } + + if (replacement == item) + { + return true; + } + + replacement->next = item->next; + replacement->prev = item->prev; + + if (replacement->next != NULL) + { + replacement->next->prev = replacement; + } + if (parent->child == item) + { + if (parent->child->prev == parent->child) + { + replacement->prev = replacement; + } + parent->child = replacement; + } + else + { /* + * To find the last item in array quickly, we use prev in array. + * We can't modify the last item's next pointer where this item was the parent's child + */ + if (replacement->prev != NULL) + { + replacement->prev->next = replacement; + } + if (replacement->next == NULL) + { + parent->child->prev = replacement; + } + } + + item->next = NULL; + item->prev = NULL; + cJSON_Delete(item); + + return true; } CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) { - if (which < 0) - { - return false; - } + if (which < 0) + { + return false; + } - return cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem); + return cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem); } static cJSON_bool replace_item_in_object(cJSON *object, const char *string, cJSON *replacement, cJSON_bool case_sensitive) { - if ((replacement == NULL) || (string == NULL)) - { - return false; - } + if ((replacement == NULL) || (string == NULL)) + { + return false; + } + + /* replace the name in the replacement */ + if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL)) + { + cJSON_free(replacement->string); + } + replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); + if (replacement->string == NULL) + { + return false; + } - /* replace the name in the replacement */ - if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL)) - { - cJSON_free(replacement->string); - } - replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); - replacement->type &= ~cJSON_StringIsConst; + replacement->type &= ~cJSON_StringIsConst; - return cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement); + return cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement); } CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) { - return replace_item_in_object(object, string, newitem, false); + return replace_item_in_object(object, string, newitem, false); } CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem) { - return replace_item_in_object(object, string, newitem, true); + return replace_item_in_object(object, string, newitem, true); } /* Create basic types: */ CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void) { - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = cJSON_NULL; - } + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_NULL; + } - return item; + return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void) { - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = cJSON_True; - } + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_True; + } - return item; + return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void) { - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = cJSON_False; - } + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_False; + } - return item; + return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean) { - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = boolean ? cJSON_True : cJSON_False; - } + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = boolean ? cJSON_True : cJSON_False; + } - return item; + return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num) { - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = cJSON_Number; - item->valuedouble = num; - - /* use saturation in case of overflow */ - if (num >= INT_MAX) - { - item->valueint = INT_MAX; - } - else if (num <= (double)INT_MIN) - { - item->valueint = INT_MIN; - } - else - { - item->valueint = (int)num; - } - } - - return item; + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_Number; + item->valuedouble = num; + + /* use saturation in case of overflow */ + if (num >= INT_MAX) + { + item->valueint = INT_MAX; + } + else if (num <= (double)INT_MIN) + { + item->valueint = INT_MIN; + } + else + { + item->valueint = (int)num; + } + } + + return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string) { - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = cJSON_String; - item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); - if(!item->valuestring) - { - cJSON_Delete(item); - return NULL; - } - } + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_String; + item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); + if(!item->valuestring) + { + cJSON_Delete(item); + return NULL; + } + } - return item; + return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string) { - cJSON *item = cJSON_New_Item(&global_hooks); - if (item != NULL) - { - item->type = cJSON_String | cJSON_IsReference; - item->valuestring = (char*)cast_away_const(string); - } + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) + { + item->type = cJSON_String | cJSON_IsReference; + item->valuestring = (char*)cast_away_const(string); + } - return item; + return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child) { - cJSON *item = cJSON_New_Item(&global_hooks); - if (item != NULL) { - item->type = cJSON_Object | cJSON_IsReference; - item->child = (cJSON*)cast_away_const(child); - } + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Object | cJSON_IsReference; + item->child = (cJSON*)cast_away_const(child); + } - return item; + return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) { - cJSON *item = cJSON_New_Item(&global_hooks); - if (item != NULL) { - item->type = cJSON_Array | cJSON_IsReference; - item->child = (cJSON*)cast_away_const(child); - } + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Array | cJSON_IsReference; + item->child = (cJSON*)cast_away_const(child); + } - return item; + return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw) { - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type = cJSON_Raw; - item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks); - if(!item->valuestring) - { - cJSON_Delete(item); - return NULL; - } - } + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_Raw; + item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks); + if(!item->valuestring) + { + cJSON_Delete(item); + return NULL; + } + } - return item; + return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) { - cJSON *item = cJSON_New_Item(&global_hooks); - if(item) - { - item->type=cJSON_Array; - } + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type=cJSON_Array; + } - return item; + return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void) { - cJSON *item = cJSON_New_Item(&global_hooks); - if (item) - { - item->type = cJSON_Object; - } + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = cJSON_Object; + } - return item; + return item; } /* Create Arrays: */ CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) { - size_t i = 0; - cJSON *n = NULL; - cJSON *p = NULL; - cJSON *a = NULL; - - if ((count < 0) || (numbers == NULL)) - { - return NULL; - } - - a = cJSON_CreateArray(); - - for(i = 0; a && (i < (size_t)count); i++) - { - n = cJSON_CreateNumber(numbers[i]); - if (!n) - { - cJSON_Delete(a); - return NULL; - } - if(!i) - { - a->child = n; - } - else - { - suffix_object(p, n); - } - p = n; - } - - if (a && a->child) { - a->child->prev = n; - } - - return a; + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for(i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber(numbers[i]); + if (!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + if (a && a->child) { + a->child->prev = n; + } + + return a; } CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count) { - size_t i = 0; - cJSON *n = NULL; - cJSON *p = NULL; - cJSON *a = NULL; - - if ((count < 0) || (numbers == NULL)) - { - return NULL; - } - - a = cJSON_CreateArray(); - - for(i = 0; a && (i < (size_t)count); i++) - { - n = cJSON_CreateNumber((double)numbers[i]); - if(!n) - { - cJSON_Delete(a); - return NULL; - } - if(!i) - { - a->child = n; - } - else - { - suffix_object(p, n); - } - p = n; - } - - if (a && a->child) { - a->child->prev = n; - } - - return a; + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for(i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber((double)numbers[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + if (a && a->child) { + a->child->prev = n; + } + + return a; } CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count) { - size_t i = 0; - cJSON *n = NULL; - cJSON *p = NULL; - cJSON *a = NULL; - - if ((count < 0) || (numbers == NULL)) - { - return NULL; - } - - a = cJSON_CreateArray(); - - for(i = 0; a && (i < (size_t)count); i++) - { - n = cJSON_CreateNumber(numbers[i]); - if(!n) - { - cJSON_Delete(a); - return NULL; - } - if(!i) - { - a->child = n; - } - else - { - suffix_object(p, n); - } - p = n; - } - - if (a && a->child) { - a->child->prev = n; - } - - return a; + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for(i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber(numbers[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + if (a && a->child) { + a->child->prev = n; + } + + return a; } CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count) { - size_t i = 0; - cJSON *n = NULL; - cJSON *p = NULL; - cJSON *a = NULL; - - if ((count < 0) || (strings == NULL)) - { - return NULL; - } - - a = cJSON_CreateArray(); - - for (i = 0; a && (i < (size_t)count); i++) - { - n = cJSON_CreateString(strings[i]); - if(!n) - { - cJSON_Delete(a); - return NULL; - } - if(!i) - { - a->child = n; - } - else - { - suffix_object(p,n); - } - p = n; - } - - if (a && a->child) { - a->child->prev = n; - } - - return a; + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (strings == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for (i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateString(strings[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p,n); + } + p = n; + } + + if (a && a->child) { + a->child->prev = n; + } + + return a; } /* Duplication */ CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse) { - cJSON *newitem = NULL; - cJSON *child = NULL; - cJSON *next = NULL; - cJSON *newchild = NULL; - - /* Bail on bad ptr */ - if (!item) - { - goto fail; - } - /* Create new item */ - newitem = cJSON_New_Item(&global_hooks); - if (!newitem) - { - goto fail; - } - /* Copy over all vars */ - newitem->type = item->type & (~cJSON_IsReference); - newitem->valueint = item->valueint; - newitem->valuedouble = item->valuedouble; - if (item->valuestring) - { - newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks); - if (!newitem->valuestring) - { - goto fail; - } - } - if (item->string) - { - newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks); - if (!newitem->string) - { - goto fail; - } - } - /* If non-recursive, then we're done! */ - if (!recurse) - { - return newitem; - } - /* Walk the ->next chain for the child. */ - child = item->child; - while (child != NULL) - { - newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */ - if (!newchild) - { - goto fail; - } - if (next != NULL) - { - /* If newitem->child already set, then crosswire ->prev and ->next and move on */ - next->next = newchild; - newchild->prev = next; - next = newchild; - } - else - { - /* Set newitem->child and move to it */ - newitem->child = newchild; - next = newchild; - } - child = child->next; - } - if (newitem && newitem->child) - { - newitem->child->prev = newchild; - } - - return newitem; + cJSON *newitem = NULL; + cJSON *child = NULL; + cJSON *next = NULL; + cJSON *newchild = NULL; + + /* Bail on bad ptr */ + if (!item) + { + goto fail; + } + /* Create new item */ + newitem = cJSON_New_Item(&global_hooks); + if (!newitem) + { + goto fail; + } + /* Copy over all vars */ + newitem->type = item->type & (~cJSON_IsReference); + newitem->valueint = item->valueint; + newitem->valuedouble = item->valuedouble; + if (item->valuestring) + { + newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks); + if (!newitem->valuestring) + { + goto fail; + } + } + if (item->string) + { + newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks); + if (!newitem->string) + { + goto fail; + } + } + /* If non-recursive, then we're done! */ + if (!recurse) + { + return newitem; + } + /* Walk the ->next chain for the child. */ + child = item->child; + while (child != NULL) + { + newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */ + if (!newchild) + { + goto fail; + } + if (next != NULL) + { + /* If newitem->child already set, then crosswire ->prev and ->next and move on */ + next->next = newchild; + newchild->prev = next; + next = newchild; + } + else + { + /* Set newitem->child and move to it */ + newitem->child = newchild; + next = newchild; + } + child = child->next; + } + if (newitem && newitem->child) + { + newitem->child->prev = newchild; + } + + return newitem; fail: - if (newitem != NULL) - { - cJSON_Delete(newitem); - } + if (newitem != NULL) + { + cJSON_Delete(newitem); + } - return NULL; + return NULL; } static void skip_oneline_comment(char **input) { - *input += static_strlen("//"); + *input += static_strlen("//"); - for (; (*input)[0] != '\0'; ++(*input)) - { - if ((*input)[0] == '\n') { - *input += static_strlen("\n"); - return; - } - } + for (; (*input)[0] != '\0'; ++(*input)) + { + if ((*input)[0] == '\n') { + *input += static_strlen("\n"); + return; + } + } } static void skip_multiline_comment(char **input) { - *input += static_strlen("/*"); + *input += static_strlen("/*"); - for (; (*input)[0] != '\0'; ++(*input)) - { - if (((*input)[0] == '*') && ((*input)[1] == '/')) - { - *input += static_strlen("*/"); - return; - } - } + for (; (*input)[0] != '\0'; ++(*input)) + { + if (((*input)[0] == '*') && ((*input)[1] == '/')) + { + *input += static_strlen("*/"); + return; + } + } } static void minify_string(char **input, char **output) { - (*output)[0] = (*input)[0]; - *input += static_strlen("\""); - *output += static_strlen("\""); + (*output)[0] = (*input)[0]; + *input += static_strlen("\""); + *output += static_strlen("\""); - for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) { - (*output)[0] = (*input)[0]; + for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) { + (*output)[0] = (*input)[0]; - if ((*input)[0] == '\"') { - (*output)[0] = '\"'; - *input += static_strlen("\""); - *output += static_strlen("\""); - return; - } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) { - (*output)[1] = (*input)[1]; - *input += static_strlen("\""); - *output += static_strlen("\""); - } - } + if ((*input)[0] == '\"') { + (*output)[0] = '\"'; + *input += static_strlen("\""); + *output += static_strlen("\""); + return; + } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) { + (*output)[1] = (*input)[1]; + *input += static_strlen("\""); + *output += static_strlen("\""); + } + } } CJSON_PUBLIC(void) cJSON_Minify(char *json) { - char *into = json; - - if (json == NULL) - { - return; - } - - while (json[0] != '\0') - { - switch (json[0]) - { - case ' ': - case '\t': - case '\r': - case '\n': - json++; - break; - - case '/': - if (json[1] == '/') - { - skip_oneline_comment(&json); - } - else if (json[1] == '*') - { - skip_multiline_comment(&json); - } else { - json++; - } - break; - - case '\"': - minify_string(&json, (char**)&into); - break; - - default: - into[0] = json[0]; - json++; - into++; - } - } - - /* and null-terminate. */ - *into = '\0'; + char *into = json; + + if (json == NULL) + { + return; + } + + while (json[0] != '\0') + { + switch (json[0]) + { + case ' ': + case '\t': + case '\r': + case '\n': + json++; + break; + + case '/': + if (json[1] == '/') + { + skip_oneline_comment(&json); + } + else if (json[1] == '*') + { + skip_multiline_comment(&json); + } else { + json++; + } + break; + + case '\"': + minify_string(&json, (char**)&into); + break; + + default: + into[0] = json[0]; + json++; + into++; + } + } + + /* and null-terminate. */ + *into = '\0'; } CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item) { - if (item == NULL) - { - return false; - } + if (item == NULL) + { + return false; + } - return (item->type & 0xFF) == cJSON_Invalid; + return (item->type & 0xFF) == cJSON_Invalid; } CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item) { - if (item == NULL) - { - return false; - } + if (item == NULL) + { + return false; + } - return (item->type & 0xFF) == cJSON_False; + return (item->type & 0xFF) == cJSON_False; } CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item) { - if (item == NULL) - { - return false; - } + if (item == NULL) + { + return false; + } - return (item->type & 0xff) == cJSON_True; + return (item->type & 0xff) == cJSON_True; } CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item) { - if (item == NULL) - { - return false; - } + if (item == NULL) + { + return false; + } - return (item->type & (cJSON_True | cJSON_False)) != 0; + return (item->type & (cJSON_True | cJSON_False)) != 0; } CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item) { - if (item == NULL) - { - return false; - } + if (item == NULL) + { + return false; + } - return (item->type & 0xFF) == cJSON_NULL; + return (item->type & 0xFF) == cJSON_NULL; } CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item) { - if (item == NULL) - { - return false; - } + if (item == NULL) + { + return false; + } - return (item->type & 0xFF) == cJSON_Number; + return (item->type & 0xFF) == cJSON_Number; } CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item) { - if (item == NULL) - { - return false; - } + if (item == NULL) + { + return false; + } - return (item->type & 0xFF) == cJSON_String; + return (item->type & 0xFF) == cJSON_String; } CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item) { - if (item == NULL) - { - return false; - } + if (item == NULL) + { + return false; + } - return (item->type & 0xFF) == cJSON_Array; + return (item->type & 0xFF) == cJSON_Array; } CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item) { - if (item == NULL) - { - return false; - } + if (item == NULL) + { + return false; + } - return (item->type & 0xFF) == cJSON_Object; + return (item->type & 0xFF) == cJSON_Object; } CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item) { - if (item == NULL) - { - return false; - } + if (item == NULL) + { + return false; + } - return (item->type & 0xFF) == cJSON_Raw; + return (item->type & 0xFF) == cJSON_Raw; } CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive) { - if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF))) - { - return false; - } - - /* check if type is valid */ - switch (a->type & 0xFF) - { - case cJSON_False: - case cJSON_True: - case cJSON_NULL: - case cJSON_Number: - case cJSON_String: - case cJSON_Raw: - case cJSON_Array: - case cJSON_Object: - break; - - default: - return false; - } - - /* identical objects are equal */ - if (a == b) - { - return true; - } - - switch (a->type & 0xFF) - { - /* in these cases and equal type is enough */ - case cJSON_False: - case cJSON_True: - case cJSON_NULL: - return true; - - case cJSON_Number: - if (compare_double(a->valuedouble, b->valuedouble)) - { - return true; - } - return false; - - case cJSON_String: - case cJSON_Raw: - if ((a->valuestring == NULL) || (b->valuestring == NULL)) - { - return false; - } - if (strcmp(a->valuestring, b->valuestring) == 0) - { - return true; - } - - return false; - - case cJSON_Array: - { - cJSON *a_element = a->child; - cJSON *b_element = b->child; - - for (; (a_element != NULL) && (b_element != NULL);) - { - if (!cJSON_Compare(a_element, b_element, case_sensitive)) - { - return false; - } - - a_element = a_element->next; - b_element = b_element->next; - } - - /* one of the arrays is longer than the other */ - if (a_element != b_element) { - return false; - } - - return true; - } - - case cJSON_Object: - { - cJSON *a_element = NULL; - cJSON *b_element = NULL; - cJSON_ArrayForEach(a_element, a) - { - /* TODO This has O(n^2) runtime, which is horrible! */ - b_element = get_object_item(b, a_element->string, case_sensitive); - if (b_element == NULL) - { - return false; - } - - if (!cJSON_Compare(a_element, b_element, case_sensitive)) - { - return false; - } - } - - /* doing this twice, once on a and b to prevent true comparison if a subset of b - * TODO: Do this the proper way, this is just a fix for now */ - cJSON_ArrayForEach(b_element, b) - { - a_element = get_object_item(a, b_element->string, case_sensitive); - if (a_element == NULL) - { - return false; - } - - if (!cJSON_Compare(b_element, a_element, case_sensitive)) - { - return false; - } - } - - return true; - } - - default: - return false; - } + if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF))) + { + return false; + } + + /* check if type is valid */ + switch (a->type & 0xFF) + { + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + case cJSON_Number: + case cJSON_String: + case cJSON_Raw: + case cJSON_Array: + case cJSON_Object: + break; + + default: + return false; + } + + /* identical objects are equal */ + if (a == b) + { + return true; + } + + switch (a->type & 0xFF) + { + /* in these cases and equal type is enough */ + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + return true; + + case cJSON_Number: + if (compare_double(a->valuedouble, b->valuedouble)) + { + return true; + } + return false; + + case cJSON_String: + case cJSON_Raw: + if ((a->valuestring == NULL) || (b->valuestring == NULL)) + { + return false; + } + if (strcmp(a->valuestring, b->valuestring) == 0) + { + return true; + } + + return false; + + case cJSON_Array: + { + cJSON *a_element = a->child; + cJSON *b_element = b->child; + + for (; (a_element != NULL) && (b_element != NULL);) + { + if (!cJSON_Compare(a_element, b_element, case_sensitive)) + { + return false; + } + + a_element = a_element->next; + b_element = b_element->next; + } + + /* one of the arrays is longer than the other */ + if (a_element != b_element) { + return false; + } + + return true; + } + + case cJSON_Object: + { + cJSON *a_element = NULL; + cJSON *b_element = NULL; + cJSON_ArrayForEach(a_element, a) + { + /* TODO This has O(n^2) runtime, which is horrible! */ + b_element = get_object_item(b, a_element->string, case_sensitive); + if (b_element == NULL) + { + return false; + } + + if (!cJSON_Compare(a_element, b_element, case_sensitive)) + { + return false; + } + } + + /* doing this twice, once on a and b to prevent true comparison if a subset of b + * TODO: Do this the proper way, this is just a fix for now */ + cJSON_ArrayForEach(b_element, b) + { + a_element = get_object_item(a, b_element->string, case_sensitive); + if (a_element == NULL) + { + return false; + } + + if (!cJSON_Compare(b_element, a_element, case_sensitive)) + { + return false; + } + } + + return true; + } + + default: + return false; + } } CJSON_PUBLIC(void *) cJSON_malloc(size_t size) { - return global_hooks.allocate(size); + return global_hooks.allocate(size); } CJSON_PUBLIC(void) cJSON_free(void *object) { - global_hooks.deallocate(object); + global_hooks.deallocate(object); } +/* Amazon edit */ +/* NOLINTEND */ diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h b/contrib/restricted/aws/aws-c-common/source/external/cJSON.h index 56959147708..47594475ee0 100644 --- a/contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h +++ b/contrib/restricted/aws/aws-c-common/source/external/cJSON.h @@ -1,37 +1,34 @@ /* -Copyright (c) 2009-2017 Dave Gamble and cJSON contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. + Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. */ /* * This file has been modified from its original version by Amazon: - * (1) Address clang-tidy errors by renaming function parameters in a number of places - * to match their .c counterparts. - * (2) Misc tweaks to unchecked writes to make security static analysis happier - * (3) Remove cJSON_GetErrorPtr and global_error as they are not thread-safe. + * (1) Remove cJSON_GetErrorPtr and global_error as they are not thread-safe. + * (2) Add NOLINTBEGIN/NOLINTEND so clang-tidy ignores file. */ +/* NOLINTBEGIN */ -/* clang-format off */ - -#ifndef AWS_COMMON_EXTERNAL_CJSON_H // NOLINT -#define AWS_COMMON_EXTERNAL_CJSON_H // NOLINT +#ifndef cJSON__h +#define cJSON__h #ifdef __cplusplus extern "C" @@ -91,7 +88,7 @@ then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJ /* project version */ #define CJSON_VERSION_MAJOR 1 #define CJSON_VERSION_MINOR 7 -#define CJSON_VERSION_PATCH 15 +#define CJSON_VERSION_PATCH 17 #include <stddef.h> @@ -112,37 +109,37 @@ then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJ /* The cJSON structure: */ typedef struct cJSON { - /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */ - struct cJSON *next; - struct cJSON *prev; - /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */ - struct cJSON *child; - - /* The type of the item, as above. */ - int type; - - /* The item's string, if type==cJSON_String and type == cJSON_Raw */ - char *valuestring; - /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */ - int valueint; - /* The item's number, if type==cJSON_Number */ - double valuedouble; - - /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */ - char *string; + /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */ + struct cJSON *next; + struct cJSON *prev; + /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */ + struct cJSON *child; + + /* The type of the item, as above. */ + int type; + + /* The item's string, if type==cJSON_String and type == cJSON_Raw */ + char *valuestring; + /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */ + int valueint; + /* The item's number, if type==cJSON_Number */ + double valuedouble; + + /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */ + char *string; } cJSON; typedef struct cJSON_Hooks { - /* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */ - void *(CJSON_CDECL *malloc_fn)(size_t sz); // NOLINT - void (CJSON_CDECL *free_fn)(void *ptr); + /* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */ + void *(CJSON_CDECL *malloc_fn)(size_t sz); + void (CJSON_CDECL *free_fn)(void *ptr); } cJSON_Hooks; typedef int cJSON_bool; /* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them. -* This is to prevent stack overflows. */ + * This is to prevent stack overflows. */ #ifndef CJSON_NESTING_LIMIT #define CJSON_NESTING_LIMIT 1000 #endif @@ -182,6 +179,10 @@ CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index); CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string); CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string); CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string); +#if 0 /* Amazon edit */ +/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void); +#endif /* Amazon edit */ /* Check item type and return its value */ CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item); @@ -212,15 +213,15 @@ CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void); CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void); /* Create a string where valuestring references a string so -* it will not be freed by cJSON_Delete */ + * it will not be freed by cJSON_Delete */ CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string); /* Create an object/array that only references it's elements so -* they will not be freed by cJSON_Delete */ + * they will not be freed by cJSON_Delete */ CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child); CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child); /* These utilities create an Array of count items. -* The parameter count cannot be greater than the number of elements in the number array, otherwise array access will be out of bounds.*/ + * The parameter count cannot be greater than the number of elements in the number array, otherwise array access will be out of bounds.*/ CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count); CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count); CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count); @@ -230,8 +231,8 @@ CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int co CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item); CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item); /* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object. -* WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before -* writing to `item->string` */ + * WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before + * writing to `item->string` */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item); /* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); @@ -256,19 +257,19 @@ CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,co /* Duplicate a cJSON item */ CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse); /* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will -* need to be released. With recurse!=0, it will duplicate any children connected to the item. -* The item->next and ->prev pointers are always zero on return from Duplicate. */ + * need to be released. With recurse!=0, it will duplicate any children connected to the item. + * The item->next and ->prev pointers are always zero on return from Duplicate. */ /* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal. -* case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */ -CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive); // NOLINT + * case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */ +CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive); /* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from strings. -* The input pointer json cannot point to a read-only address area, such as a string constant, -* but should point to a readable and writable address area. */ + * The input pointer json cannot point to a read-only address area, such as a string constant, + * but should point to a readable and writable address area. */ CJSON_PUBLIC(void) cJSON_Minify(char *json); /* Helper functions for creating and adding items to an object at the same time. -* They return the added item or NULL on failure. */ + * They return the added item or NULL on failure. */ CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name); CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name); CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name); @@ -280,15 +281,22 @@ CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name); /* When assigning an integer value, it needs to be propagated to valuedouble too. */ -#define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number)) //NOLINT +#define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number)) /* helper for the cJSON_SetNumberValue macro */ CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number); -#define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number)) //NOLINT +#define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number)) /* Change the valuestring of a cJSON_String object, only takes effect when type of object is cJSON_String */ CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring); +/* If the object is not a boolean type this does nothing and returns cJSON_Invalid else it returns the new type*/ +#define cJSON_SetBoolValue(object, boolValue) ( \ + (object != NULL && ((object)->type & (cJSON_False|cJSON_True))) ? \ + (object)->type=((object)->type &(~(cJSON_False|cJSON_True)))|((boolValue)?cJSON_True:cJSON_False) : \ + cJSON_Invalid\ +) + /* Macro for iterating over an array or object */ -#define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next) //NOLINT +#define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next) /* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */ CJSON_PUBLIC(void *) cJSON_malloc(size_t size); @@ -297,5 +305,6 @@ CJSON_PUBLIC(void) cJSON_free(void *object); #ifdef __cplusplus } #endif - +/* Amazon edit */ +/* NOLINTEND */ #endif diff --git a/contrib/restricted/aws/aws-c-common/source/file.c b/contrib/restricted/aws/aws-c-common/source/file.c index 01eb0a6afde..504e547f509 100644 --- a/contrib/restricted/aws/aws-c-common/source/file.c +++ b/contrib/restricted/aws/aws-c-common/source/file.c @@ -11,6 +11,16 @@ #include <errno.h> +/* For "special files", the OS often lies about size. + * For example, on Amazon Linux 2: + * /proc/cpuinfo: size is 0, but contents are several KB of data. + * /sys/devices/virtual/dmi/id/product_name: size is 4096, but contents are "c5.2xlarge" + * + * Therefore, we may need to grow the buffer as we read until EOF. + * This is the min/max step size for growth. */ +#define MIN_BUFFER_GROWTH_READING_FILES 32 +#define MAX_BUFFER_GROWTH_READING_FILES 4096 + FILE *aws_fopen(const char *file_path, const char *mode) { if (!file_path || strlen(file_path) == 0) { AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file. path is empty"); @@ -34,55 +44,117 @@ FILE *aws_fopen(const char *file_path, const char *mode) { return file; } -int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename) { +/* Helper function used by aws_byte_buf_init_from_file() and aws_byte_buf_init_from_file_with_size_hint() */ +static int s_byte_buf_init_from_file_impl( + struct aws_byte_buf *out_buf, + struct aws_allocator *alloc, + const char *filename, + bool use_file_size_as_hint, + size_t size_hint) { AWS_ZERO_STRUCT(*out_buf); FILE *fp = aws_fopen(filename, "rb"); + if (fp == NULL) { + goto error; + } - if (fp) { - if (fseek(fp, 0L, SEEK_END)) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno_value); - fclose(fp); - return aws_translate_and_raise_io_error(errno_value); + if (use_file_size_as_hint) { + int64_t len64 = 0; + if (aws_file_get_length(fp, &len64)) { + AWS_LOGF_ERROR( + AWS_LS_COMMON_IO, + "static: Failed to get file length. file:'%s' error:%s", + filename, + aws_error_name(aws_last_error())); + goto error; + } + + if (len64 >= SIZE_MAX) { + aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); + AWS_LOGF_ERROR( + AWS_LS_COMMON_IO, + "static: File too large to read into memory. file:'%s' error:%s", + filename, + aws_error_name(aws_last_error())); + goto error; } - size_t allocation_size = (size_t)ftell(fp) + 1; - /* Tell the user that we allocate here and if success they're responsible for the free. */ - if (aws_byte_buf_init(out_buf, alloc, allocation_size)) { - fclose(fp); - return AWS_OP_ERR; + /* Leave space for null terminator at end of buffer */ + size_hint = (size_t)len64 + 1; + } + + aws_byte_buf_init(out_buf, alloc, size_hint); + + /* Read in a loop until we hit EOF */ + while (true) { + /* Expand buffer if necessary (at a reasonable rate) */ + if (out_buf->len == out_buf->capacity) { + size_t additional_capacity = out_buf->capacity; + additional_capacity = aws_max_size(MIN_BUFFER_GROWTH_READING_FILES, additional_capacity); + additional_capacity = aws_min_size(MAX_BUFFER_GROWTH_READING_FILES, additional_capacity); + if (aws_byte_buf_reserve_relative(out_buf, additional_capacity)) { + AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to grow buffer for file:'%s'", filename); + goto error; + } } - /* Ensure compatibility with null-terminated APIs, but don't consider - * the null terminator part of the length of the payload */ - out_buf->len = out_buf->capacity - 1; - out_buf->buffer[out_buf->len] = 0; - - if (fseek(fp, 0L, SEEK_SET)) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno_value); - aws_byte_buf_clean_up(out_buf); - fclose(fp); - return aws_translate_and_raise_io_error(errno_value); + size_t space_available = out_buf->capacity - out_buf->len; + size_t bytes_read = fread(out_buf->buffer + out_buf->len, 1, space_available, fp); + out_buf->len += bytes_read; + + /* If EOF, we're done! */ + if (feof(fp)) { + break; } - size_t read = fread(out_buf->buffer, 1, out_buf->len, fp); - int errno_cpy = errno; /* Always cache errno before potential side-effect */ - fclose(fp); - if (read < out_buf->len) { - AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to read file %s with errno %d", filename, errno_cpy); - aws_secure_zero(out_buf->buffer, out_buf->len); - aws_byte_buf_clean_up(out_buf); - return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); + /* If no EOF but we read 0 bytes, there's been an error or at least we need + * to treat it like one because we can't just infinitely loop. */ + if (bytes_read == 0) { + int errno_value = ferror(fp) ? errno : 0; /* Always cache errno before potential side-effect */ + aws_translate_and_raise_io_error_or(errno_value, AWS_ERROR_FILE_READ_FAILURE); + AWS_LOGF_ERROR( + AWS_LS_COMMON_IO, + "static: Failed reading file:'%s' errno:%d aws-error:%s", + filename, + errno_value, + aws_error_name(aws_last_error())); + goto error; } + } - return AWS_OP_SUCCESS; + /* A null terminator is appended, but is not included as part of the length field. */ + if (out_buf->len == out_buf->capacity) { + if (aws_byte_buf_reserve_relative(out_buf, 1)) { + AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to grow buffer for file:'%s'", filename); + goto error; + } } + out_buf->buffer[out_buf->len] = 0; + fclose(fp); + return AWS_OP_SUCCESS; + +error: + if (fp) { + fclose(fp); + } + aws_byte_buf_clean_up_secure(out_buf); return AWS_OP_ERR; } +int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename) { + return s_byte_buf_init_from_file_impl(out_buf, alloc, filename, true /*use_file_size_as_hint*/, 0 /*size_hint*/); +} + +int aws_byte_buf_init_from_file_with_size_hint( + struct aws_byte_buf *out_buf, + struct aws_allocator *alloc, + const char *filename, + size_t size_hint) { + + return s_byte_buf_init_from_file_impl(out_buf, alloc, filename, false /*use_file_size_as_hint*/, size_hint); +} + bool aws_is_any_directory_separator(char value) { return value == '\\' || value == '/'; } diff --git a/contrib/restricted/aws/aws-c-common/source/hash_table.c b/contrib/restricted/aws/aws-c-common/source/hash_table.c index 88926e48f97..bd5d04a3fc9 100644 --- a/contrib/restricted/aws/aws-c-common/source/hash_table.c +++ b/contrib/restricted/aws/aws-c-common/source/hash_table.c @@ -973,7 +973,7 @@ uint64_t aws_hash_byte_cursor_ptr(const void *item) { /* first digits of pi in hex */ uint32_t b = 0x3243F6A8, c = 0x885A308D; hashlittle2(cur->ptr, cur->len, &c, &b); - AWS_RETURN_WITH_POSTCONDITION(((uint64_t)b << 32) | c, aws_byte_cursor_is_valid(cur)); + AWS_RETURN_WITH_POSTCONDITION(((uint64_t)b << 32) | c, aws_byte_cursor_is_valid(cur)); /* NOLINT */ } uint64_t aws_hash_ptr(const void *item) { @@ -1104,3 +1104,11 @@ int hash_table_state_required_bytes(size_t size, size_t *required_bytes) { return AWS_OP_SUCCESS; } + +uint64_t aws_hash_uint64_t_by_identity(const void *item) { + return *(uint64_t *)item; +} + +bool aws_hash_compare_uint64_t_eq(const void *a, const void *b) { + return *(uint64_t *)a == *(uint64_t *)b; +} diff --git a/contrib/restricted/aws/aws-c-common/source/host_utils.c b/contrib/restricted/aws/aws-c-common/source/host_utils.c new file mode 100644 index 00000000000..6cd26ba88e2 --- /dev/null +++ b/contrib/restricted/aws/aws-c-common/source/host_utils.c @@ -0,0 +1,127 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/common/host_utils.h> +#include <aws/common/string.h> +#include <inttypes.h> + +#ifdef _MSC_VER /* Disable sscanf warnings on windows. */ +# pragma warning(disable : 4204) +# pragma warning(disable : 4706) +# pragma warning(disable : 4996) +#endif + +/* 4 octets of 3 chars max + 3 separators + null terminator */ +#define AWS_IPV4_STR_LEN 16 +#define IP_CHAR_FMT "%03" SCNu16 + +static bool s_is_ipv6_char(uint8_t value) { + return aws_isxdigit(value) || value == ':'; +} + +static bool s_starts_with(struct aws_byte_cursor cur, uint8_t ch) { + return cur.len > 0 && cur.ptr[0] == ch; +} + +static bool s_ends_with(struct aws_byte_cursor cur, uint8_t ch) { + return cur.len > 0 && cur.ptr[cur.len - 1] == ch; +} + +bool aws_host_utils_is_ipv4(struct aws_byte_cursor host) { + if (host.len > AWS_IPV4_STR_LEN - 1) { + return false; + } + + char copy[AWS_IPV4_STR_LEN] = {0}; + memcpy(copy, host.ptr, host.len); + + uint16_t octet[4] = {0}; + char remainder[2] = {0}; + if (4 != sscanf( + copy, + IP_CHAR_FMT "." IP_CHAR_FMT "." IP_CHAR_FMT "." IP_CHAR_FMT "%1s", + &octet[0], + &octet[1], + &octet[2], + &octet[3], + remainder)) { + return false; + } + + for (size_t i = 0; i < 4; ++i) { + if (octet[i] > 255) { + return false; + } + } + + return true; +} + +/* actual encoding is %25, but % is omitted for simplicity, since split removes it */ +static struct aws_byte_cursor s_percent_uri_enc = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("25"); +/* + * IPv6 format: + * 8 groups of 4 hex chars separated by colons (:) + * leading 0s in each group can be skipped + * 2 or more consecutive zero groups can be replaced by double colon (::), + * but only once. + * ipv6 literal can be scoped by to zone by appending % followed by zone name + * ( does not look like there is length reqs on zone name length. this + * implementation enforces that its > 1 ) + * ipv6 can be embedded in url, in which case it must be wrapped inside [] + * and % be uri encoded as %25. + * Implementation is fairly trivial and just iterates through the string + * keeping track of the spec above. + */ +bool aws_host_utils_is_ipv6(struct aws_byte_cursor host, bool is_uri_encoded) { + if (host.len == 0) { + return false; + } + + if (is_uri_encoded) { + if (!s_starts_with(host, '[') || !s_ends_with(host, ']')) { + return false; + } + aws_byte_cursor_advance(&host, 1); + --host.len; + } + + struct aws_byte_cursor substr = {0}; + /* first split is required ipv6 part */ + bool is_split = aws_byte_cursor_next_split(&host, '%', &substr); + AWS_ASSERT(is_split); /* function is guaranteed to return at least one split */ + + if (!is_split || substr.len == 0 || s_ends_with(substr, ':') || + !aws_byte_cursor_satisfies_pred(&substr, s_is_ipv6_char)) { + return false; + } + + uint8_t group_count = 0; + bool has_double_colon = false; + struct aws_byte_cursor group = {0}; + while (aws_byte_cursor_next_split(&substr, ':', &group)) { + ++group_count; + + if (group_count > 8 || /* too many groups */ + group.len > 4 || /* too many chars in group */ + (has_double_colon && group.len == 0 && group_count > 2)) { /* only one double colon allowed */ + return false; + } + + has_double_colon = has_double_colon || group.len == 0; + } + + /* second split is optional zone part */ + if (aws_byte_cursor_next_split(&host, '%', &substr)) { + if ((is_uri_encoded && + (substr.len < 3 || + !aws_byte_cursor_starts_with(&substr, &s_percent_uri_enc))) || /* encoding for % + 1 extra char */ + (!is_uri_encoded && substr.len == 0) || /* at least 1 char */ + !aws_byte_cursor_satisfies_pred(&substr, aws_isalnum)) { + return false; + } + } + + return has_double_colon ? group_count < 7 : group_count == 8; +} diff --git a/contrib/restricted/aws/aws-c-common/source/json.c b/contrib/restricted/aws/aws-c-common/source/json.c index 0131ea116b5..326354d628e 100644 --- a/contrib/restricted/aws/aws-c-common/source/json.c +++ b/contrib/restricted/aws/aws-c-common/source/json.c @@ -9,7 +9,7 @@ #include <aws/common/json.h> #include <aws/common/private/json_impl.h> -#include <aws/common/external/cJSON.h> +#include "external/cJSON.h" static struct aws_allocator *s_aws_json_module_allocator = NULL; static bool s_aws_json_module_initialized = false; @@ -397,7 +397,7 @@ int aws_byte_buf_append_json_string(const struct aws_json_value *value, struct a char *tmp = cJSON_PrintUnformatted(cjson); if (tmp == NULL) { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } // Append the text to the byte buffer @@ -415,7 +415,7 @@ int aws_byte_buf_append_json_string_formatted(const struct aws_json_value *value char *tmp = cJSON_Print(cjson); if (tmp == NULL) { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } // Append the text to the byte buffer diff --git a/contrib/restricted/aws/aws-c-common/source/linux/system_info.c b/contrib/restricted/aws/aws-c-common/source/linux/system_info.c new file mode 100644 index 00000000000..2d9c5a120db --- /dev/null +++ b/contrib/restricted/aws/aws-c-common/source/linux/system_info.c @@ -0,0 +1,24 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/common/file.h> +#include <aws/common/private/system_info_priv.h> + +int aws_system_environment_load_platform_impl(struct aws_system_environment *env) { + /* provide size_hint when reading "special files", since some platforms mis-report these files' size as 4KB */ + aws_byte_buf_init_from_file_with_size_hint( + &env->virtualization_vendor, env->allocator, "/sys/devices/virtual/dmi/id/sys_vendor", 32 /*size_hint*/); + + /* whether this one works depends on if this is a sysfs filesystem. If it fails, it will just be empty + * and these APIs are a best effort at the moment. We can add fallbacks as the loaders get more complicated. */ + aws_byte_buf_init_from_file_with_size_hint( + &env->product_name, env->allocator, "/sys/devices/virtual/dmi/id/product_name", 32 /*size_hint*/); + + return AWS_OP_SUCCESS; +} + +void aws_system_environment_destroy_platform_impl(struct aws_system_environment *env) { + aws_byte_buf_clean_up(&env->virtualization_vendor); + aws_byte_buf_clean_up(&env->product_name); +} diff --git a/contrib/restricted/aws/aws-c-common/source/log_formatter.c b/contrib/restricted/aws/aws-c-common/source/log_formatter.c index d4be0c0c6d2..b6e74e40775 100644 --- a/contrib/restricted/aws/aws-c-common/source/log_formatter.c +++ b/contrib/restricted/aws/aws-c-common/source/log_formatter.c @@ -20,15 +20,17 @@ # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif -/* (max) strlen of "[<LogLevel>]" */ -#define LOG_LEVEL_PREFIX_PADDING 7 +enum { + /* (max) strlen of "[<LogLevel>]" */ + LOG_LEVEL_PREFIX_PADDING = 7, -/* (max) strlen of "[<ThreadId>]" */ -#define THREAD_ID_PREFIX_PADDING 22 + /* (max) strlen of "[<ThreadId>]" */ + THREAD_ID_PREFIX_PADDING = 22, -/* strlen of (user-content separator) " - " + "\n" + spaces between prefix fields + brackets around timestamp + 1 + - subject_name padding */ -#define MISC_PADDING 15 + /* strlen of (user-content separator) " - " + "\n" + spaces between prefix fields + brackets around timestamp + 1 + + subject_name padding */ + MISC_PADDING = 15, +}; #define MAX_LOG_LINE_PREFIX_SIZE \ (LOG_LEVEL_PREFIX_PADDING + THREAD_ID_PREFIX_PADDING + MISC_PADDING + AWS_DATE_TIME_STR_MAX_LEN) @@ -203,7 +205,7 @@ static int s_default_aws_log_formatter_format( struct aws_default_log_formatter_impl *impl = formatter->impl; if (formatted_output == NULL) { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* diff --git a/contrib/restricted/aws/aws-c-common/source/log_writer.c b/contrib/restricted/aws/aws-c-common/source/log_writer.c index 5f5bc4f6fd5..9f50906274e 100644 --- a/contrib/restricted/aws/aws-c-common/source/log_writer.c +++ b/contrib/restricted/aws/aws-c-common/source/log_writer.c @@ -27,8 +27,8 @@ static int s_aws_file_writer_write(struct aws_log_writer *writer, const struct a size_t length = output->len; if (fwrite(output->bytes, 1, length, impl->log_file) < length) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - return aws_translate_and_raise_io_error(errno_value); + int errno_value = ferror(impl->log_file) ? errno : 0; /* Always cache errno before potential side-effect */ + return aws_translate_and_raise_io_error_or(errno_value, AWS_ERROR_FILE_WRITE_FAILURE); } return AWS_OP_SUCCESS; diff --git a/contrib/restricted/aws/aws-c-common/source/logging.c b/contrib/restricted/aws/aws-c-common/source/logging.c index fdc29576d82..46f5e7dbe51 100644 --- a/contrib/restricted/aws/aws-c-common/source/logging.c +++ b/contrib/restricted/aws/aws-c-common/source/logging.c @@ -337,11 +337,11 @@ int aws_thread_id_t_to_string(aws_thread_id_t thread_id, char *buffer, size_t bu unsigned char c = bytes[i - 1]; int written = snprintf(buffer + current_index, bufsz - current_index, "%02x", c); if (written < 0) { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } current_index += written; if (bufsz <= current_index) { - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } } return AWS_OP_SUCCESS; @@ -454,7 +454,7 @@ static enum aws_log_level s_noalloc_stderr_logger_get_log_level(struct aws_logge return (enum aws_log_level)aws_atomic_load_int(&impl->level); } -#define MAXIMUM_NO_ALLOC_LOG_LINE_SIZE 8192 +enum { MAXIMUM_NO_ALLOC_LOG_LINE_SIZE = 8192 }; static int s_noalloc_stderr_logger_log( struct aws_logger *logger, @@ -502,8 +502,8 @@ static int s_noalloc_stderr_logger_log( int write_result = AWS_OP_SUCCESS; if (fwrite(format_buffer, 1, format_data.amount_written, impl->file) < format_data.amount_written) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - aws_translate_and_raise_io_error(errno_value); + int errno_value = ferror(impl->file) ? errno : 0; /* Always cache errno before potential side-effect */ + aws_translate_and_raise_io_error_or(errno_value, AWS_ERROR_FILE_WRITE_FAILURE); write_result = AWS_OP_ERR; } diff --git a/contrib/restricted/aws/aws-c-common/source/memtrace.c b/contrib/restricted/aws/aws-c-common/source/memtrace.c index 651fd93612a..9c5bff60cc0 100644 --- a/contrib/restricted/aws/aws-c-common/source/memtrace.c +++ b/contrib/restricted/aws/aws-c-common/source/memtrace.c @@ -57,7 +57,7 @@ struct alloc_tracer { }; /* number of frames to skip in call stacks (s_alloc_tracer_track, and the vtable function) */ -#define FRAMES_TO_SKIP 2 +enum { FRAMES_TO_SKIP = 2 }; static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size); static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr); @@ -438,11 +438,18 @@ static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr) { static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size) { struct alloc_tracer *tracer = allocator->impl; void *new_ptr = old_ptr; - if (aws_mem_realloc(tracer->traced_allocator, &new_ptr, old_size, new_size)) { - return NULL; - } + /* + * Careful with the ordering of state clean up here. + * Tracer keeps a hash table (alloc ptr as key) of meta info about each allocation. + * To avoid race conditions during realloc state update needs to be done in + * following order to avoid race conditions: + * - remove meta info (other threads cant reuse that key, cause ptr is still valid ) + * - realloc (cant fail, ptr might remain the same) + * - add meta info for reallocated mem + */ s_alloc_tracer_untrack(tracer, old_ptr); + aws_mem_realloc(tracer->traced_allocator, &new_ptr, old_size, new_size); s_alloc_tracer_track(tracer, new_ptr, new_size); return new_ptr; diff --git a/contrib/restricted/aws/aws-c-common/source/posix/clock.c b/contrib/restricted/aws/aws-c-common/source/posix/clock.c index 90e213ea7c8..b2c3bc3f0e7 100644 --- a/contrib/restricted/aws/aws-c-common/source/posix/clock.c +++ b/contrib/restricted/aws/aws-c-common/source/posix/clock.c @@ -43,11 +43,11 @@ static int s_legacy_get_time(uint64_t *timestamp) { # if MAC_OS_X_VERSION_MAX_ALLOWED >= 101200 static aws_thread_once s_thread_once_flag = AWS_THREAD_ONCE_STATIC_INIT; -static int (*s_gettime_fn)(clockid_t __clock_id, struct timespec *__tp) = NULL; +static int (*s_gettime_fn)(clockid_t clock_id, struct timespec *tp) = NULL; static void s_do_osx_loads(void *user_data) { (void)user_data; - s_gettime_fn = (int (*)(clockid_t __clock_id, struct timespec * __tp)) dlsym(RTLD_DEFAULT, "clock_gettime"); + s_gettime_fn = (int (*)(clockid_t clock_id, struct timespec * tp)) dlsym(RTLD_DEFAULT, "clock_gettime"); } int aws_high_res_clock_get_ticks(uint64_t *timestamp) { diff --git a/contrib/restricted/aws/aws-c-common/source/posix/condition_variable.c b/contrib/restricted/aws/aws-c-common/source/posix/condition_variable.c index ca321c6bfad..64edb45b85a 100644 --- a/contrib/restricted/aws/aws-c-common/source/posix/condition_variable.c +++ b/contrib/restricted/aws/aws-c-common/source/posix/condition_variable.c @@ -93,12 +93,10 @@ int aws_condition_variable_wait_for( return AWS_OP_ERR; } - time_to_wait += current_sys_time; - struct timespec ts; uint64_t remainder = 0; - ts.tv_sec = - (time_t)aws_timestamp_convert((uint64_t)time_to_wait, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &remainder); + ts.tv_sec = (time_t)aws_timestamp_convert( + (uint64_t)(time_to_wait + current_sys_time), AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &remainder); ts.tv_nsec = (long)remainder; int err_code = pthread_cond_timedwait(&condition_variable->condition_handle, &mutex->mutex_handle, &ts); diff --git a/contrib/restricted/aws/aws-c-common/source/posix/cross_process_lock.c b/contrib/restricted/aws/aws-c-common/source/posix/cross_process_lock.c new file mode 100644 index 00000000000..1ef5d2b5fe4 --- /dev/null +++ b/contrib/restricted/aws/aws-c-common/source/posix/cross_process_lock.c @@ -0,0 +1,141 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/common/cross_process_lock.h> + +#include <aws/common/byte_buf.h> +#include <errno.h> +#include <fcntl.h> +#include <sys/file.h> +#include <sys/stat.h> +#include <unistd.h> + +#include <aws/common/error.h> +#include <aws/common/file.h> +#include <aws/common/logging.h> + +struct aws_cross_process_lock { + struct aws_allocator *allocator; + int locked_fd; +}; + +struct aws_cross_process_lock *aws_cross_process_lock_try_acquire( + struct aws_allocator *allocator, + struct aws_byte_cursor instance_nonce) { + + /* validate we don't have a directory slash. */ + struct aws_byte_cursor to_find = aws_byte_cursor_from_c_str("/"); + struct aws_byte_cursor found; + AWS_ZERO_STRUCT(found); + if (aws_byte_cursor_find_exact(&instance_nonce, &to_find, &found) != AWS_OP_ERR && + aws_last_error() != AWS_ERROR_STRING_MATCH_NOT_FOUND) { + AWS_LOGF_ERROR( + AWS_LS_COMMON_GENERAL, + "static: Lock " PRInSTR "creation has illegal character /", + AWS_BYTE_CURSOR_PRI(instance_nonce)); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + + /* + * The unix standard says /tmp has to be there and be writable. However, while it may be tempting to just use the + * /tmp/ directory, it often has the sticky bit set which would prevent a subprocess from being able to call open + * with create on the file. The solution is simple, just write it to a subdirectory inside + * /tmp and override umask via. chmod of 0777. + */ + struct aws_byte_cursor path_prefix = aws_byte_cursor_from_c_str("/tmp/aws_crt_cross_process_lock/"); + struct aws_string *path_to_create = aws_string_new_from_cursor(allocator, &path_prefix); + + /* It's probably there already and we don't care if it is. */ + if (!aws_directory_exists(path_to_create)) { + /* if this call fails just let it fail on open below. */ + aws_directory_create(path_to_create); + /* bypass umask by setting the perms we actually requested */ + chmod(aws_string_c_str(path_to_create), S_IRWXU | S_IRWXG | S_IRWXO); + } + aws_string_destroy(path_to_create); + + struct aws_byte_cursor path_suffix = aws_byte_cursor_from_c_str(".lock"); + + struct aws_byte_buf nonce_buf; + aws_byte_buf_init_copy_from_cursor(&nonce_buf, allocator, path_prefix); + aws_byte_buf_append_dynamic(&nonce_buf, &instance_nonce); + aws_byte_buf_append_dynamic(&nonce_buf, &path_suffix); + aws_byte_buf_append_null_terminator(&nonce_buf); + + struct aws_cross_process_lock *instance_lock = NULL; + + errno = 0; + int fd = open((const char *)nonce_buf.buffer, O_CREAT | O_RDWR, 0666); + if (fd < 0) { + AWS_LOGF_DEBUG( + AWS_LS_COMMON_GENERAL, + "static: Lock file %s failed to open with errno %d", + (const char *)nonce_buf.buffer, + errno); + + aws_translate_and_raise_io_error_or(errno, AWS_ERROR_MUTEX_FAILED); + + if (aws_last_error() == AWS_ERROR_NO_PERMISSION) { + AWS_LOGF_DEBUG( + AWS_LS_COMMON_GENERAL, + "static: Lock file %s couldn't be opened due to file ownership permissions. Attempting to open as read " + "only", + (const char *)nonce_buf.buffer); + + errno = 0; + fd = open((const char *)nonce_buf.buffer, O_RDONLY); + + if (fd < 0) { + AWS_LOGF_ERROR( + AWS_LS_COMMON_GENERAL, + "static: Lock file %s failed to open with read-only permissions with errno %d", + (const char *)nonce_buf.buffer, + errno); + aws_translate_and_raise_io_error_or(errno, AWS_ERROR_MUTEX_FAILED); + goto cleanup; + } + } else { + AWS_LOGF_ERROR( + AWS_LS_COMMON_GENERAL, + "static: Lock file %s failed to open. The lock cannot be acquired.", + (const char *)nonce_buf.buffer); + goto cleanup; + } + } + + if (flock(fd, LOCK_EX | LOCK_NB) == -1) { + AWS_LOGF_TRACE( + AWS_LS_COMMON_GENERAL, + "static: Lock file %s already acquired by another instance", + (const char *)nonce_buf.buffer); + close(fd); + aws_raise_error(AWS_ERROR_MUTEX_CALLER_NOT_OWNER); + goto cleanup; + } + + instance_lock = aws_mem_calloc(allocator, 1, sizeof(struct aws_cross_process_lock)); + instance_lock->locked_fd = fd; + instance_lock->allocator = allocator; + + AWS_LOGF_TRACE( + AWS_LS_COMMON_GENERAL, + "static: Lock file %s acquired by this instance with fd %d", + (const char *)nonce_buf.buffer, + fd); + +cleanup: + aws_byte_buf_clean_up(&nonce_buf); + + return instance_lock; +} + +void aws_cross_process_lock_release(struct aws_cross_process_lock *instance_lock) { + if (instance_lock) { + flock(instance_lock->locked_fd, LOCK_UN); + close(instance_lock->locked_fd); + AWS_LOGF_TRACE(AWS_LS_COMMON_GENERAL, "static: Lock file released for fd %d", instance_lock->locked_fd); + aws_mem_release(instance_lock->allocator, instance_lock); + } +} diff --git a/contrib/restricted/aws/aws-c-common/source/posix/device_random.c b/contrib/restricted/aws/aws-c-common/source/posix/device_random.c index 23ab1cd7b44..53f063ddf7b 100644 --- a/contrib/restricted/aws/aws-c-common/source/posix/device_random.c +++ b/contrib/restricted/aws/aws-c-common/source/posix/device_random.c @@ -51,7 +51,8 @@ int aws_device_random_buffer_append(struct aws_byte_buf *output, size_t n) { /* read() can fail if N is too large (e.g. x64 macos fails if N > INT32_MAX), * so work in reasonably sized chunks. */ while (n > 0) { - size_t capped_n = aws_min_size(n, 1024 * 1024 * 1024 * 1 /* 1GiB */); + size_t capped_n = aws_min_size( + n, 1024 * 1024 * 1024 * 1 /* 1GiB */); /* NOLINT(bugprone-implicit-widening-of-multiplication-result) */ ssize_t amount_read = read(s_rand_fd, output->buffer + output->len, capped_n); diff --git a/contrib/restricted/aws/aws-c-common/source/posix/file.c b/contrib/restricted/aws/aws-c-common/source/posix/file.c index f8dbbd7e125..86811295523 100644 --- a/contrib/restricted/aws/aws-c-common/source/posix/file.c +++ b/contrib/restricted/aws/aws-c-common/source/posix/file.c @@ -18,7 +18,7 @@ FILE *aws_fopen_safe(const struct aws_string *file_path, const struct aws_string FILE *f = fopen(aws_string_c_str(file_path), aws_string_c_str(mode)); if (!f) { int errno_cpy = errno; /* Always cache errno before potential side-effect */ - aws_translate_and_raise_io_error(errno_cpy); + aws_translate_and_raise_io_error_or(errno_cpy, AWS_ERROR_FILE_OPEN_FAILURE); AWS_LOGF_ERROR( AWS_LS_COMMON_IO, "static: Failed to open file. path:'%s' mode:'%s' errno:%d aws-error:%d(%s)", @@ -285,7 +285,7 @@ int aws_fseek(FILE *file, int64_t offset, int whence) { int errno_value = errno; /* Always cache errno before potential side-effect */ if (result != 0) { - return aws_translate_and_raise_io_error(errno_value); + return aws_translate_and_raise_io_error_or(errno_value, AWS_ERROR_STREAM_UNSEEKABLE); } return AWS_OP_SUCCESS; diff --git a/contrib/restricted/aws/aws-c-common/source/posix/mutex.c b/contrib/restricted/aws/aws-c-common/source/posix/mutex.c index 2cbf2db66ce..b5c865700a5 100644 --- a/contrib/restricted/aws/aws-c-common/source/posix/mutex.c +++ b/contrib/restricted/aws/aws-c-common/source/posix/mutex.c @@ -23,9 +23,11 @@ int aws_mutex_init(struct aws_mutex *mutex) { int return_code = AWS_OP_SUCCESS; if (!err_code) { - if ((err_code = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL)) || - (err_code = pthread_mutex_init(&mutex->mutex_handle, &attr))) { - + err_code = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL); + if (!err_code) { + err_code = pthread_mutex_init(&mutex->mutex_handle, &attr); + } + if (err_code) { return_code = aws_private_convert_and_raise_error_code(err_code); } pthread_mutexattr_destroy(&attr); diff --git a/contrib/restricted/aws/aws-c-common/source/posix/system_info.c b/contrib/restricted/aws/aws-c-common/source/posix/system_info.c index 54bb502d80a..b5a0edc87e9 100644 --- a/contrib/restricted/aws/aws-c-common/source/posix/system_info.c +++ b/contrib/restricted/aws/aws-c-common/source/posix/system_info.c @@ -50,10 +50,10 @@ size_t aws_system_info_processor_count(void) { uint16_t aws_get_cpu_group_count(void) { if (g_numa_num_configured_nodes_ptr) { - return (uint16_t)g_numa_num_configured_nodes_ptr(); + return aws_max_u16(1, (uint16_t)g_numa_num_configured_nodes_ptr()); } - return 1u; + return 1U; } size_t aws_get_cpu_count_for_group(uint16_t group_idx) { @@ -242,7 +242,7 @@ int s_parse_symbol(const char *symbol, void *addr, struct aws_stack_frame_info * if (function_len >= (sizeof(frame->function) - 1)) { function_len = sizeof(frame->function) - 1; } - strncpy(frame->function, function_start, function_end - function_start); + strncpy(frame->function, function_start, function_len); /* find base addr for library/exe */ Dl_info addr_info; diff --git a/contrib/restricted/aws/aws-c-common/source/posix/system_resource_utils.c b/contrib/restricted/aws/aws-c-common/source/posix/system_resource_utils.c new file mode 100644 index 00000000000..68165072b2f --- /dev/null +++ b/contrib/restricted/aws/aws-c-common/source/posix/system_resource_utils.c @@ -0,0 +1,32 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include <aws/common/system_resource_util.h> + +#include <sys/resource.h> + +int aws_init_memory_usage_for_current_process(struct aws_memory_usage_stats *memory_usage) { + AWS_PRECONDITION(memory_usage); + + AWS_ZERO_STRUCT(*memory_usage); + struct rusage usage; + + if (getrusage(RUSAGE_SELF, &usage)) { + return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); + } + +#if defined(AWS_OS_APPLE) + /* + * For some reason Apple switched to reporting this in bytes instead of KB + * around MacOS 10.6. + * Make it back to KB. Result might be slightly off due to rounding. + */ + memory_usage->maxrss = usage.ru_maxrss / 1024; +#else + memory_usage->maxrss = usage.ru_maxrss; +#endif + memory_usage->page_faults = usage.ru_majflt; + return AWS_OP_SUCCESS; +} diff --git a/contrib/restricted/aws/aws-c-common/source/posix/thread.c b/contrib/restricted/aws/aws-c-common/source/posix/thread.c index 57d48aa9c70..af7fac84cf9 100644 --- a/contrib/restricted/aws/aws-c-common/source/posix/thread.c +++ b/contrib/restricted/aws/aws-c-common/source/posix/thread.c @@ -4,7 +4,7 @@ */ #if !defined(__MACH__) -# define _GNU_SOURCE +# define _GNU_SOURCE /* NOLINT(bugprone-reserved-identifier) */ #endif #include <aws/common/clock.h> @@ -296,9 +296,9 @@ int aws_thread_launch( attr_return = pthread_attr_setaffinity_np(attributes_ptr, sizeof(cpuset), &cpuset); if (attr_return) { - AWS_LOGF_ERROR( + AWS_LOGF_WARN( AWS_LS_COMMON_THREAD, - "id=%p: pthread_attr_setaffinity_np() failed with %d.", + "id=%p: pthread_attr_setaffinity_np() failed with %d. Continuing without cpu affinity", (void *)thread, attr_return); goto cleanup; @@ -382,7 +382,20 @@ cleanup: if (attr_return) { s_thread_wrapper_destroy(wrapper); - + if (options && options->cpu_id >= 0) { + /* + * `pthread_create` can fail with an `EINVAL` error or `EDEADLK` on freebasd if the `cpu_id` is + * restricted/invalid. Since the pinning to a particular `cpu_id` is supposed to be best-effort, try to + * launch a thread again without pinning to a specific cpu_id. + */ + AWS_LOGF_INFO( + AWS_LS_COMMON_THREAD, + "id=%p: Attempting to launch the thread again without pinning to a cpu_id", + (void *)thread); + struct aws_thread_options new_options = *options; + new_options.cpu_id = -1; + return aws_thread_launch(thread, func, arg, &new_options); + } switch (attr_return) { case EINVAL: return aws_raise_error(AWS_ERROR_THREAD_INVALID_SETTINGS); diff --git a/contrib/restricted/aws/aws-c-common/source/priority_queue.c b/contrib/restricted/aws/aws-c-common/source/priority_queue.c index 86a91feb3ae..fcea718bf2c 100644 --- a/contrib/restricted/aws/aws-c-common/source/priority_queue.c +++ b/contrib/restricted/aws/aws-c-common/source/priority_queue.c @@ -400,3 +400,27 @@ size_t aws_priority_queue_size(const struct aws_priority_queue *queue) { size_t aws_priority_queue_capacity(const struct aws_priority_queue *queue) { return aws_array_list_capacity(&queue->container); } + +void aws_priority_queue_clear(struct aws_priority_queue *queue) { + AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); + size_t backpointer_count = aws_array_list_length(&queue->backpointers); + for (size_t i = 0; i < backpointer_count; ++i) { + struct aws_priority_queue_node *node = NULL; + aws_array_list_get_at(&queue->backpointers, &node, i); + if (node != NULL) { + node->current_index = SIZE_MAX; + } + } + + aws_array_list_clear(&queue->backpointers); + aws_array_list_clear(&queue->container); + AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); +} + +void aws_priority_queue_node_init(struct aws_priority_queue_node *node) { + node->current_index = SIZE_MAX; +} + +bool aws_priority_queue_node_is_in_queue(const struct aws_priority_queue_node *node) { + return node->current_index != SIZE_MAX; +} diff --git a/contrib/restricted/aws/aws-c-common/source/process_common.c b/contrib/restricted/aws/aws-c-common/source/process_common.c index ef432374b85..24a25094baf 100644 --- a/contrib/restricted/aws/aws-c-common/source/process_common.c +++ b/contrib/restricted/aws/aws-c-common/source/process_common.c @@ -9,7 +9,7 @@ #include <stdio.h> #include <sys/types.h> -#define MAX_BUFFER_SIZE (2048) +enum { MAX_BUFFER_SIZE = 2048 }; int aws_run_command_result_init(struct aws_allocator *allocator, struct aws_run_command_result *result) { if (!allocator || !result) { diff --git a/contrib/restricted/aws/aws-c-common/source/string.c b/contrib/restricted/aws/aws-c-common/source/string.c index a3d2c204ed8..2fd79123057 100644 --- a/contrib/restricted/aws/aws-c-common/source/string.c +++ b/contrib/restricted/aws/aws-c-common/source/string.c @@ -189,11 +189,8 @@ struct aws_string *aws_string_new_from_c_str(struct aws_allocator *allocator, co struct aws_string *aws_string_new_from_array(struct aws_allocator *allocator, const uint8_t *bytes, size_t len) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(AWS_MEM_IS_READABLE(bytes, len)); - size_t malloc_size; - if (aws_add_size_checked(sizeof(struct aws_string) + 1, len, &malloc_size)) { - return NULL; - } - struct aws_string *str = aws_mem_acquire(allocator, malloc_size); + + struct aws_string *str = aws_mem_acquire(allocator, offsetof(struct aws_string, bytes[len + 1])); if (!str) { return NULL; } diff --git a/contrib/restricted/aws/aws-c-common/source/system_info.c b/contrib/restricted/aws/aws-c-common/source/system_info.c new file mode 100644 index 00000000000..4b721f63a65 --- /dev/null +++ b/contrib/restricted/aws/aws-c-common/source/system_info.c @@ -0,0 +1,80 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/common/private/system_info_priv.h> + +#include <aws/common/logging.h> + +void s_destroy_env(void *arg) { + struct aws_system_environment *env = arg; + + if (env) { + aws_system_environment_destroy_platform_impl(env); + aws_mem_release(env->allocator, env); + } +} + +struct aws_system_environment *aws_system_environment_load(struct aws_allocator *allocator) { + struct aws_system_environment *env = aws_mem_calloc(allocator, 1, sizeof(struct aws_system_environment)); + env->allocator = allocator; + aws_ref_count_init(&env->ref_count, env, s_destroy_env); + + if (aws_system_environment_load_platform_impl(env)) { + AWS_LOGF_ERROR( + AWS_LS_COMMON_GENERAL, + "id=%p: failed to load system environment with error %s.", + (void *)env, + aws_error_debug_str(aws_last_error())); + goto error; + } + + AWS_LOGF_TRACE( + AWS_LS_COMMON_GENERAL, + "id=%p: virtualization vendor detected as \"" PRInSTR "\"", + (void *)env, + AWS_BYTE_CURSOR_PRI(aws_system_environment_get_virtualization_vendor(env))); + AWS_LOGF_TRACE( + AWS_LS_COMMON_GENERAL, + "id=%p: virtualization product name detected as \"" PRInSTR " \"", + (void *)env, + AWS_BYTE_CURSOR_PRI(aws_system_environment_get_virtualization_vendor(env))); + + env->os = aws_get_platform_build_os(); + env->cpu_count = aws_system_info_processor_count(); + env->cpu_group_count = aws_get_cpu_group_count(); + + return env; +error: + s_destroy_env(env); + return NULL; +} + +struct aws_system_environment *aws_system_environment_acquire(struct aws_system_environment *env) { + aws_ref_count_acquire(&env->ref_count); + return env; +} + +void aws_system_environment_release(struct aws_system_environment *env) { + aws_ref_count_release(&env->ref_count); +} + +struct aws_byte_cursor aws_system_environment_get_virtualization_vendor(const struct aws_system_environment *env) { + struct aws_byte_cursor vendor_string = aws_byte_cursor_from_buf(&env->virtualization_vendor); + return aws_byte_cursor_trim_pred(&vendor_string, aws_char_is_space); +} + +struct aws_byte_cursor aws_system_environment_get_virtualization_product_name( + const struct aws_system_environment *env) { + struct aws_byte_cursor product_name_str = aws_byte_cursor_from_buf(&env->product_name); + return aws_byte_cursor_trim_pred(&product_name_str, aws_char_is_space); +} + +size_t aws_system_environment_get_processor_count(struct aws_system_environment *env) { + return env->cpu_count; +} + +AWS_COMMON_API +size_t aws_system_environment_get_cpu_group_count(const struct aws_system_environment *env) { + return env->cpu_group_count; +} diff --git a/contrib/restricted/aws/aws-c-common/source/task_scheduler.c b/contrib/restricted/aws/aws-c-common/source/task_scheduler.c index 4467b124937..bca150e39bf 100644 --- a/contrib/restricted/aws/aws-c-common/source/task_scheduler.c +++ b/contrib/restricted/aws/aws-c-common/source/task_scheduler.c @@ -135,7 +135,7 @@ void aws_task_scheduler_schedule_now(struct aws_task_scheduler *scheduler, struc (void *)task, task->type_tag); - task->priority_queue_node.current_index = SIZE_MAX; + aws_priority_queue_node_init(&task->priority_queue_node); aws_linked_list_node_reset(&task->node); task->timestamp = 0; @@ -161,7 +161,7 @@ void aws_task_scheduler_schedule_future( task->timestamp = time_to_run; - task->priority_queue_node.current_index = SIZE_MAX; + aws_priority_queue_node_init(&task->priority_queue_node); aws_linked_list_node_reset(&task->node); int err = aws_priority_queue_push_ref(&scheduler->timed_queue, &task, &task->priority_queue_node); if (AWS_UNLIKELY(err)) { diff --git a/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c b/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c index 7999344b7b9..4948e3c5da2 100644 --- a/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c +++ b/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c @@ -32,7 +32,7 @@ struct cancellation_node { static void s_destroy_callback(void *arg) { struct aws_thread_scheduler *scheduler = arg; - aws_atomic_store_int(&scheduler->should_exit, 1u); + aws_atomic_store_int(&scheduler->should_exit, 1U); aws_condition_variable_notify_all(&scheduler->thread_data.c_var); aws_thread_join(&scheduler->thread); aws_task_scheduler_clean_up(&scheduler->scheduler); @@ -138,7 +138,7 @@ struct aws_thread_scheduler *aws_thread_scheduler_new( } scheduler->allocator = allocator; - aws_atomic_init_int(&scheduler->should_exit, 0u); + aws_atomic_init_int(&scheduler->should_exit, 0U); aws_ref_count_init(&scheduler->ref_count, scheduler, s_destroy_callback); aws_linked_list_init(&scheduler->thread_data.scheduling_queue); aws_linked_list_init(&scheduler->thread_data.cancel_queue); @@ -182,7 +182,7 @@ void aws_thread_scheduler_schedule_future( aws_condition_variable_notify_one(&scheduler->thread_data.c_var); } void aws_thread_scheduler_schedule_now(struct aws_thread_scheduler *scheduler, struct aws_task *task) { - aws_thread_scheduler_schedule_future(scheduler, task, 0u); + aws_thread_scheduler_schedule_future(scheduler, task, 0U); } void aws_thread_scheduler_cancel_task(struct aws_thread_scheduler *scheduler, struct aws_task *task) { diff --git a/contrib/restricted/aws/aws-c-common/source/thread_shared.c b/contrib/restricted/aws/aws-c-common/source/thread_shared.c index cbceb428eda..bd479a16e0d 100644 --- a/contrib/restricted/aws/aws-c-common/source/thread_shared.c +++ b/contrib/restricted/aws/aws-c-common/source/thread_shared.c @@ -106,7 +106,7 @@ int aws_thread_join_all_managed(void) { aws_condition_variable_wait_for_pred( &s_managed_thread_signal, &s_managed_thread_lock, - wait_ns, + (int64_t)wait_ns, s_one_or_fewer_managed_threads_unjoined, NULL); } else { diff --git a/contrib/restricted/aws/aws-c-common/source/uri.c b/contrib/restricted/aws/aws-c-common/source/uri.c index cefe57e8768..f9ecc9d77ed 100644 --- a/contrib/restricted/aws/aws-c-common/source/uri.c +++ b/contrib/restricted/aws/aws-c-common/source/uri.c @@ -30,6 +30,9 @@ struct uri_parser { enum parser_state state; }; +/* strlen of UINT32_MAX "4294967295" is 10, plus 1 for '\0' */ +#define PORT_BUFFER_SIZE 11 + typedef void(parse_fn)(struct uri_parser *parser, struct aws_byte_cursor *str); static void s_parse_scheme(struct uri_parser *parser, struct aws_byte_cursor *str); @@ -101,8 +104,7 @@ int aws_uri_init_from_builder_options( buffer_size += options->host_name.len; if (options->port) { - /* max strlen of a 16 bit integer is 5 */ - buffer_size += 6; + buffer_size += PORT_BUFFER_SIZE; } buffer_size += options->path.len; @@ -142,8 +144,8 @@ int aws_uri_init_from_builder_options( struct aws_byte_cursor port_app = aws_byte_cursor_from_c_str(":"); if (options->port) { aws_byte_buf_append(&uri->uri_str, &port_app); - char port_arr[6] = {0}; - snprintf(port_arr, sizeof(port_arr), "%" PRIu16, options->port); + char port_arr[PORT_BUFFER_SIZE] = {0}; + snprintf(port_arr, sizeof(port_arr), "%" PRIu32, options->port); struct aws_byte_cursor port_csr = aws_byte_cursor_from_c_str(port_arr); aws_byte_buf_append(&uri->uri_str, &port_csr); } @@ -208,11 +210,11 @@ const struct aws_byte_cursor *aws_uri_host_name(const struct aws_uri *uri) { return &uri->host_name; } -uint16_t aws_uri_port(const struct aws_uri *uri) { +uint32_t aws_uri_port(const struct aws_uri *uri) { return uri->port; } -bool aws_uri_query_string_next_param(const struct aws_uri *uri, struct aws_uri_param *param) { +bool aws_query_string_next_param(struct aws_byte_cursor query_string, struct aws_uri_param *param) { /* If param is zeroed, then this is the first run. */ bool first_run = param->value.ptr == NULL; @@ -230,7 +232,7 @@ bool aws_uri_query_string_next_param(const struct aws_uri *uri, struct aws_uri_p /* The do-while is to skip over any empty substrings */ do { - if (!aws_byte_cursor_next_split(&uri->query_string, '&', &substr)) { + if (!aws_byte_cursor_next_split(&query_string, '&', &substr)) { /* no more splits, done iterating */ return false; } @@ -252,10 +254,10 @@ bool aws_uri_query_string_next_param(const struct aws_uri *uri, struct aws_uri_p return true; } -int aws_uri_query_string_params(const struct aws_uri *uri, struct aws_array_list *out_params) { +int aws_query_string_params(struct aws_byte_cursor query_string_cursor, struct aws_array_list *out_params) { struct aws_uri_param param; AWS_ZERO_STRUCT(param); - while (aws_uri_query_string_next_param(uri, ¶m)) { + while (aws_query_string_next_param(query_string_cursor, ¶m)) { if (aws_array_list_push_back(out_params, ¶m)) { return AWS_OP_ERR; } @@ -264,6 +266,14 @@ int aws_uri_query_string_params(const struct aws_uri *uri, struct aws_array_list return AWS_OP_SUCCESS; } +bool aws_uri_query_string_next_param(const struct aws_uri *uri, struct aws_uri_param *param) { + return aws_query_string_next_param(uri->query_string, param); +} + +int aws_uri_query_string_params(const struct aws_uri *uri, struct aws_array_list *out_params) { + return aws_query_string_params(uri->query_string, out_params); +} + static void s_parse_scheme(struct uri_parser *parser, struct aws_byte_cursor *str) { const uint8_t *location_of_colon = memchr(str->ptr, ':', str->len); @@ -377,31 +387,23 @@ static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor size_t port_len = authority_parse_csr.len - parser->uri->host_name.len - 1; port_delim += 1; - for (size_t i = 0; i < port_len; ++i) { - if (!aws_isdigit(port_delim[i])) { + + uint64_t port_u64 = 0; + if (port_len > 0) { + struct aws_byte_cursor port_cursor = aws_byte_cursor_from_array(port_delim, port_len); + if (aws_byte_cursor_utf8_parse_u64(port_cursor, &port_u64)) { + parser->state = ERROR; + aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); + return; + } + if (port_u64 > UINT32_MAX) { parser->state = ERROR; aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); return; } } - if (port_len > 5) { - parser->state = ERROR; - aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); - return; - } - - /* why 6? because the port is a 16-bit unsigned integer*/ - char atoi_buf[6] = {0}; - memcpy(atoi_buf, port_delim, port_len); - int port_int = atoi(atoi_buf); - if (port_int > UINT16_MAX) { - parser->state = ERROR; - aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); - return; - } - - parser->uri->port = (uint16_t)port_int; + parser->uri->port = (uint32_t)port_u64; } } diff --git a/contrib/restricted/aws/aws-c-common/source/xml_parser.c b/contrib/restricted/aws/aws-c-common/source/xml_parser.c index ac238cdfaf3..e1b5807401d 100644 --- a/contrib/restricted/aws/aws-c-common/source/xml_parser.c +++ b/contrib/restricted/aws/aws-c-common/source/xml_parser.c @@ -21,45 +21,6 @@ struct cb_stack_data { void *user_data; }; -struct aws_xml_parser *aws_xml_parser_new( - struct aws_allocator *allocator, - const struct aws_xml_parser_options *options) { - - AWS_PRECONDITION(allocator); - AWS_PRECONDITION(options); - - struct aws_xml_parser *parser = aws_mem_calloc(allocator, 1, sizeof(struct aws_xml_parser)); - - if (parser == NULL) { - return NULL; - } - - parser->allocator = allocator; - parser->doc = options->doc; - - parser->max_depth = s_max_document_depth; - parser->error = AWS_OP_SUCCESS; - - if (options->max_depth) { - parser->max_depth = options->max_depth; - } - - if (aws_array_list_init_dynamic(&parser->callback_stack, allocator, 4, sizeof(struct cb_stack_data))) { - aws_mem_release(allocator, parser); - return NULL; - } - - return parser; -} - -void aws_xml_parser_destroy(struct aws_xml_parser *parser) { - AWS_PRECONDITION(parser); - - aws_array_list_clean_up(&parser->callback_stack); - - aws_mem_release(parser->allocator, parser); -} - int s_node_next_sibling(struct aws_xml_parser *parser); static bool s_double_quote_fn(uint8_t value) { @@ -90,14 +51,14 @@ static int s_load_node_decl( * we limit to 10 attributes, if this is exceeded we consider it invalid document. */ if (aws_byte_cursor_split_on_char(decl_body, ' ', &splits)) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); - return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); + return aws_raise_error(AWS_ERROR_INVALID_XML); } size_t splits_count = aws_array_list_length(&splits); if (splits_count < 1) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); - return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); + return aws_raise_error(AWS_ERROR_INVALID_XML); } aws_array_list_get_at(&splits, &node->name, 0); @@ -134,43 +95,43 @@ static int s_load_node_decl( return AWS_OP_SUCCESS; } -int aws_xml_parser_parse( - struct aws_xml_parser *parser, - aws_xml_parser_on_node_encountered_fn *on_node_encountered, - void *user_data) { - - AWS_PRECONDITION(parser); +int aws_xml_parse(struct aws_allocator *allocator, const struct aws_xml_parser_options *options) { - if (on_node_encountered == NULL) { - AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "'on_node_encountered' argument for aws_xml_parser_parse is invalid."); - aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - return AWS_OP_ERR; - } + AWS_PRECONDITION(allocator); + AWS_PRECONDITION(options); + AWS_PRECONDITION(options->on_root_encountered); - aws_array_list_clear(&parser->callback_stack); + struct aws_xml_parser parser = { + .allocator = allocator, + .doc = options->doc, + .max_depth = options->max_depth ? options->max_depth : s_max_document_depth, + .error = AWS_OP_SUCCESS, + }; + aws_array_list_init_dynamic(&parser.callback_stack, allocator, 4, sizeof(struct cb_stack_data)); /* burn everything that precedes the actual xml nodes. */ - while (parser->doc.len) { - const uint8_t *start = memchr(parser->doc.ptr, '<', parser->doc.len); + while (parser.doc.len) { + const uint8_t *start = memchr(parser.doc.ptr, '<', parser.doc.len); if (!start) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); - return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); + parser.error = aws_raise_error(AWS_ERROR_INVALID_XML); + goto clean_up; } - const uint8_t *location = memchr(parser->doc.ptr, '>', parser->doc.len); - + const uint8_t *location = memchr(parser.doc.ptr, '>', parser.doc.len); if (!location) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); - return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); + parser.error = aws_raise_error(AWS_ERROR_INVALID_XML); + goto clean_up; } - aws_byte_cursor_advance(&parser->doc, start - parser->doc.ptr); + aws_byte_cursor_advance(&parser.doc, start - parser.doc.ptr); /* if these are preamble statements, burn them. otherwise don't seek at all * and assume it's just the doc with no preamble statements. */ - if (*(parser->doc.ptr + 1) == '?' || *(parser->doc.ptr + 1) == '!') { + if (*(parser.doc.ptr + 1) == '?' || *(parser.doc.ptr + 1) == '!') { /* nobody cares about the preamble */ - size_t advance = location - parser->doc.ptr + 1; - aws_byte_cursor_advance(&parser->doc, advance); + size_t advance = location - parser.doc.ptr + 1; + aws_byte_cursor_advance(&parser.doc, advance); } else { break; } @@ -178,12 +139,16 @@ int aws_xml_parser_parse( /* now we should be at the start of the actual document. */ struct cb_stack_data stack_data = { - .cb = on_node_encountered, - .user_data = user_data, + .cb = options->on_root_encountered, + .user_data = options->user_data, }; - AWS_FATAL_ASSERT(!aws_array_list_push_back(&parser->callback_stack, &stack_data)); - return s_node_next_sibling(parser); + aws_array_list_push_back(&parser.callback_stack, &stack_data); + parser.error = s_node_next_sibling(&parser); + +clean_up: + aws_array_list_clean_up(&parser.callback_stack); + return parser.error; } int s_advance_to_closing_tag( @@ -205,13 +170,13 @@ int s_advance_to_closing_tag( if (closing_name_len > node->doc_at_body.len) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); - parser->error = aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); + parser->error = aws_raise_error(AWS_ERROR_INVALID_XML); return AWS_OP_ERR; } if (sizeof(name_close) < closing_name_len) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); - parser->error = aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); + parser->error = aws_raise_error(AWS_ERROR_INVALID_XML); return AWS_OP_ERR; } @@ -235,7 +200,7 @@ int s_advance_to_closing_tag( do { if (aws_byte_cursor_find_exact(&parser->doc, &to_find_close, &close_find_result)) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); - return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); + return aws_raise_error(AWS_ERROR_INVALID_XML); } /* if we find an opening node with the same name, before the closing tag keep going. */ @@ -267,29 +232,24 @@ int s_advance_to_closing_tag( return parser->error; } -int aws_xml_node_as_body(struct aws_xml_parser *parser, struct aws_xml_node *node, struct aws_byte_cursor *out_body) { - AWS_PRECONDITION(parser); +int aws_xml_node_as_body(struct aws_xml_node *node, struct aws_byte_cursor *out_body) { AWS_PRECONDITION(node); + AWS_FATAL_ASSERT(!node->processed && "XML node can be traversed, or read as body, but not both."); node->processed = true; - return s_advance_to_closing_tag(parser, node, out_body); + return s_advance_to_closing_tag(node->parser, node, out_body); } int aws_xml_node_traverse( - struct aws_xml_parser *parser, struct aws_xml_node *node, aws_xml_parser_on_node_encountered_fn *on_node_encountered, void *user_data) { - AWS_PRECONDITION(parser); AWS_PRECONDITION(node); + AWS_PRECONDITION(on_node_encountered); - if (on_node_encountered == NULL) { - AWS_LOGF_ERROR( - AWS_LS_COMMON_XML_PARSER, "Callback 'on_node_encountered' for aws_xml_node_traverse is invalid."); - aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - return AWS_OP_ERR; - } + struct aws_xml_parser *parser = node->parser; + AWS_FATAL_ASSERT(!node->processed && "XML node can be traversed, or read as body, but not both."); node->processed = true; struct cb_stack_data stack_data = { .cb = on_node_encountered, @@ -298,32 +258,30 @@ int aws_xml_node_traverse( size_t doc_depth = aws_array_list_length(&parser->callback_stack); if (doc_depth >= parser->max_depth) { - AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); - parser->error = aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); - return AWS_OP_ERR; + AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document exceeds max depth."); + aws_raise_error(AWS_ERROR_INVALID_XML); + goto error; } - if (aws_array_list_push_back(&parser->callback_stack, &stack_data)) { - AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); - parser->error = aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); - return AWS_OP_ERR; - } + aws_array_list_push_back(&parser->callback_stack, &stack_data); /* look for the next node at the current level. do this until we encounter the parent node's * closing tag. */ - while (!parser->stop_parsing && !parser->error) { + while (!parser->error) { const uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len); if (!next_location) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); - return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); + aws_raise_error(AWS_ERROR_INVALID_XML); + goto error; } const uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len); if (!end_location) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); - return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); + aws_raise_error(AWS_ERROR_INVALID_XML); + goto error; } bool parent_closed = false; @@ -343,6 +301,7 @@ int aws_xml_node_traverse( struct aws_byte_cursor decl_body = aws_byte_cursor_from_array(next_location + 1, node_name_len - 1); struct aws_xml_node next_node = { + .parser = parser, .doc_at_body = parser->doc, .processed = false, }; @@ -351,38 +310,29 @@ int aws_xml_node_traverse( return AWS_OP_ERR; } - if (!on_node_encountered(parser, &next_node, user_data)) { - parser->stop_parsing = true; - return parser->error; + if (on_node_encountered(&next_node, user_data)) { + goto error; } /* if the user simply returned while skipping the node altogether, go ahead and do the skip over. */ - if (!parser->stop_parsing && !next_node.processed) { + if (!next_node.processed) { if (s_advance_to_closing_tag(parser, &next_node, NULL)) { - return AWS_OP_ERR; + goto error; } } } - if (parser->stop_parsing) { - return parser->error; - } - aws_array_list_pop_back(&parser->callback_stack); return parser->error; + +error: + parser->error = AWS_OP_ERR; + return parser->error; } -int aws_xml_node_get_name(const struct aws_xml_node *node, struct aws_byte_cursor *out_name) { +struct aws_byte_cursor aws_xml_node_get_name(const struct aws_xml_node *node) { AWS_PRECONDITION(node); - - if (out_name == NULL) { - AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "'out_name' argument for aws_xml_node_get_name is invalid."); - aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - return AWS_OP_ERR; - } - - *out_name = node->name; - return AWS_OP_SUCCESS; + return node->name; } size_t aws_xml_node_get_num_attributes(const struct aws_xml_node *node) { @@ -390,19 +340,15 @@ size_t aws_xml_node_get_num_attributes(const struct aws_xml_node *node) { return aws_array_list_length(&node->attributes); } -int aws_xml_node_get_attribute( - const struct aws_xml_node *node, - size_t attribute_index, - struct aws_xml_attribute *out_attribute) { +struct aws_xml_attribute aws_xml_node_get_attribute(const struct aws_xml_node *node, size_t attribute_index) { AWS_PRECONDITION(node); - if (out_attribute == NULL) { - AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "'out_attribute' argument for aws_xml_node_get_attribute is invalid."); - aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - return AWS_OP_ERR; + struct aws_xml_attribute attribute; + if (aws_array_list_get_at(&node->attributes, &attribute, attribute_index)) { + AWS_FATAL_ASSERT(0 && "Invalid XML attribute index"); } - return aws_array_list_get_at(&node->attributes, out_attribute, attribute_index); + return attribute; } /* advance the parser to the next sibling node.*/ @@ -420,7 +366,7 @@ int s_node_next_sibling(struct aws_xml_parser *parser) { if (!end_location) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); - return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); + return aws_raise_error(AWS_ERROR_INVALID_XML); } size_t node_name_len = end_location - next_location; @@ -429,6 +375,7 @@ int s_node_next_sibling(struct aws_xml_parser *parser) { struct aws_byte_cursor node_decl_body = aws_byte_cursor_from_array(next_location + 1, node_name_len - 1); struct aws_xml_node sibling_node = { + .parser = parser, .doc_at_body = parser->doc, .processed = false, }; @@ -442,7 +389,9 @@ int s_node_next_sibling(struct aws_xml_parser *parser) { aws_array_list_back(&parser->callback_stack, &stack_data); AWS_FATAL_ASSERT(stack_data.cb); - parser->stop_parsing = !stack_data.cb(parser, &sibling_node, stack_data.user_data); + if (stack_data.cb(&sibling_node, stack_data.user_data)) { + return AWS_OP_ERR; + } /* if the user simply returned while skipping the node altogether, go ahead and do the skip over. */ if (!sibling_node.processed) { diff --git a/contrib/restricted/aws/aws-c-common/ya.make b/contrib/restricted/aws/aws-c-common/ya.make index b3bc34dae32..d72e25b2fb4 100644 --- a/contrib/restricted/aws/aws-c-common/ya.make +++ b/contrib/restricted/aws/aws-c-common/ya.make @@ -3,6 +3,7 @@ LIBRARY() LICENSE( + "(GPL-2.0-only OR BSD-3-Clause)" AND Apache-2.0 AND BSD-3-Clause AND MIT AND @@ -11,9 +12,9 @@ LICENSE( LICENSE_TEXTS(.yandex_meta/licenses.list.txt) -VERSION(0.8.23) +VERSION(0.9.17) -ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-common/archive/v0.8.23.tar.gz) +ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-common/archive/v0.9.17.tar.gz) ADDINCL( GLOBAL contrib/restricted/aws/aws-c-common/generated/include @@ -30,6 +31,7 @@ CFLAGS( -DAWS_PTHREAD_SETNAME_TAKES_2ARGS -DCJSON_HIDE_SYMBOLS -DHAVE_SYSCONF + -DINTEL_NO_ITTNOTIFY_API ) IF (MUSL) @@ -56,8 +58,6 @@ ENDIF() IF (ARCH_X86_64) CFLAGS( - -DHAVE_MM256_EXTRACT_EPI64 - -DHAVE_AVX2_INTRINSICS -DUSE_SIMD_ENCODING ) ENDIF() @@ -81,9 +81,11 @@ SRCS( source/fifo_cache.c source/file.c source/hash_table.c + source/host_utils.c source/json.c source/lifo_cache.c source/linked_hash_table.c + source/linux/system_info.c source/log_channel.c source/log_formatter.c source/log_writer.c @@ -98,6 +100,7 @@ SRCS( source/ring_buffer.c source/statistics.c source/string.c + source/system_info.c source/task_scheduler.c source/thread_scheduler.c source/thread_shared.c @@ -108,7 +111,7 @@ SRCS( IF (ARCH_ARM) SRCS( - source/arch/arm/asm/cpuid.c + source/arch/arm/auxv/cpuid.c ) ELSEIF (ARCH_X86_64) SRCS( @@ -122,6 +125,7 @@ IF (NOT OS_WINDOWS) SRCS( source/posix/clock.c source/posix/condition_variable.c + source/posix/cross_process_lock.c source/posix/device_random.c source/posix/environment.c source/posix/file.c @@ -129,6 +133,7 @@ IF (NOT OS_WINDOWS) source/posix/process.c source/posix/rw_lock.c source/posix/system_info.c + source/posix/system_resource_utils.c source/posix/thread.c source/posix/time.c ) diff --git a/contrib/restricted/aws/aws-c-http/.yandex_meta/override.nix b/contrib/restricted/aws/aws-c-http/.yandex_meta/override.nix index e3d0ccc4c91..5088ed27063 100644 --- a/contrib/restricted/aws/aws-c-http/.yandex_meta/override.nix +++ b/contrib/restricted/aws/aws-c-http/.yandex_meta/override.nix @@ -1,10 +1,10 @@ pkgs: attrs: with pkgs; with attrs; rec { - version = "0.7.6"; + version = "0.8.1"; src = fetchFromGitHub { owner = "awslabs"; repo = "aws-c-http"; rev = "v${version}"; - hash = "sha256-pJGzGbIuz8UJkfmTQEZgXSOMuYixMezNZmgaRlcnmfg="; + hash = "sha256-S5ETVkdGTndt2GJBNL4DU5SycHAufsmN06xBDRMFVKo="; }; } diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/connection.h b/contrib/restricted/aws/aws-c-http/include/aws/http/connection.h index e6362c1439e..031957ef0f1 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/connection.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/connection.h @@ -8,8 +8,11 @@ #include <aws/http/http.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_client_bootstrap; struct aws_socket_options; +struct aws_socket_endpoint; struct aws_tls_connection_options; struct aws_http2_setting; struct proxy_env_var_settings; @@ -269,7 +272,7 @@ struct aws_http_client_connection_options { /** * Required. */ - uint16_t port; + uint32_t port; /** * Required. @@ -305,6 +308,15 @@ struct aws_http_client_connection_options { const struct aws_http_connection_monitoring_options *monitoring_options; /** + * Optional (ignored if 0). + * After a request is fully sent, if the server does not begin responding within N milliseconds, + * then fail with AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT. + * This can be overridden per-request by aws_http_make_request_options.response_first_byte_timeout_ms. + * TODO: Only supported in HTTP/1.1 now, support it in HTTP/2 + */ + uint64_t response_first_byte_timeout_ms; + + /** * Set to true to manually manage the flow-control window of each stream. * * If false, the connection will maintain its flow-control windows such that @@ -398,6 +410,12 @@ struct aws_http_client_connection_options { * event loop group associated with the client bootstrap. */ struct aws_event_loop *requested_event_loop; + + /** + * Optional + * Host resolution override that allows the user to override DNS behavior for this particular connection. + */ + const struct aws_host_resolution_config *host_resolution_config; }; /* Predefined settings identifiers (RFC-7540 6.5.2) */ @@ -507,6 +525,12 @@ AWS_HTTP_API struct aws_channel *aws_http_connection_get_channel(struct aws_http_connection *connection); /** + * Returns the remote endpoint of the HTTP connection. + */ +AWS_HTTP_API +const struct aws_socket_endpoint *aws_http_connection_get_remote_endpoint(const struct aws_http_connection *connection); + +/** * Initialize an map copied from the *src map, which maps `struct aws_string *` to `enum aws_http_version`. */ AWS_HTTP_API @@ -675,5 +699,6 @@ AWS_HTTP_API void aws_http2_connection_update_window(struct aws_http_connection *http2_connection, uint32_t increment_size); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_CONNECTION_H */ diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/connection_manager.h b/contrib/restricted/aws/aws-c-http/include/aws/http/connection_manager.h index 4c02df9382a..70b1e77b82d 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/connection_manager.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/connection_manager.h @@ -10,6 +10,8 @@ #include <aws/common/byte_buf.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_client_bootstrap; struct aws_http_connection; struct aws_http_connection_manager; @@ -78,7 +80,7 @@ struct aws_http_connection_manager_options { const struct aws_http_connection_monitoring_options *monitoring_options; struct aws_byte_cursor host; - uint16_t port; + uint32_t port; /** * Optional. @@ -190,5 +192,6 @@ void aws_http_connection_manager_fetch_metrics( struct aws_http_manager_metrics *out_metrics); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_CONNECTION_MANAGER_H */ diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/http.h b/contrib/restricted/aws/aws-c-http/include/aws/http/http.h index f02f09dc3e6..7532537d2f8 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/http.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/http.h @@ -10,6 +10,8 @@ #include <aws/http/exports.h> #include <aws/io/io.h> +AWS_PUSH_SANE_WARNING_LEVEL + #define AWS_C_HTTP_PACKAGE_ID 2 enum aws_http_errors { @@ -57,6 +59,7 @@ enum aws_http_errors { AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR, AWS_ERROR_HTTP_MANUAL_WRITE_NOT_ENABLED, AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED, + AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT, AWS_ERROR_HTTP_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_HTTP_PACKAGE_ID) }; @@ -154,5 +157,6 @@ AWS_HTTP_API extern const struct aws_byte_cursor aws_http_scheme_http; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_scheme_https; AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_H */ diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/http2_stream_manager.h b/contrib/restricted/aws/aws-c-http/include/aws/http/http2_stream_manager.h index c37da489aa6..bd78677598b 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/http2_stream_manager.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/http2_stream_manager.h @@ -8,6 +8,8 @@ #include <aws/http/http.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_http2_stream_manager; struct aws_client_bootstrap; struct aws_http_connection; @@ -66,7 +68,7 @@ struct aws_http2_stream_manager_options { bool http2_prior_knowledge; struct aws_byte_cursor host; - uint16_t port; + uint32_t port; /** * Optional. @@ -212,4 +214,6 @@ void aws_http2_stream_manager_fetch_metrics( struct aws_http_manager_metrics *out_metrics); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL + #endif /* AWS_HTTP2_STREAM_MANAGER_H */ diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_impl.h index a97ab0daba9..fd9c915ab29 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_impl.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_impl.h @@ -20,10 +20,10 @@ struct aws_http_make_request_options; struct aws_http_request_handler_options; struct aws_http_stream; -typedef int aws_client_bootstrap_new_socket_channel_fn(struct aws_socket_channel_bootstrap_options *options); - +/* vtable of functions that aws_http_connection uses to interact with external systems. + * tests override the vtable to mock those systems */ struct aws_http_connection_system_vtable { - aws_client_bootstrap_new_socket_channel_fn *new_socket_channel; + int (*aws_client_bootstrap_new_socket_channel)(struct aws_socket_channel_bootstrap_options *options); }; struct aws_http_connection_vtable { @@ -103,7 +103,7 @@ struct aws_http_connection { union { struct aws_http_connection_client_data { - uint8_t delete_me; /* exists to prevent "empty struct" errors */ + uint64_t response_first_byte_timeout_ms; } client; struct aws_http_connection_server_data { @@ -133,6 +133,7 @@ struct aws_http_client_bootstrap { aws_http_on_client_connection_setup_fn *on_setup; aws_http_on_client_connection_shutdown_fn *on_shutdown; aws_http_proxy_request_transform_fn *proxy_request_transform; + uint64_t response_first_byte_timeout_ms; struct aws_http1_connection_options http1_options; struct aws_http2_connection_options http2_options; /* allocated with bootstrap */ diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_manager_system_vtable.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_manager_system_vtable.h index 115ba661364..d5d183b82ed 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_manager_system_vtable.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_manager_system_vtable.h @@ -12,28 +12,20 @@ struct aws_http_connection_manager; -typedef int(aws_http_connection_manager_create_connection_fn)(const struct aws_http_client_connection_options *options); -typedef void(aws_http_connection_manager_close_connection_fn)(struct aws_http_connection *connection); -typedef void(aws_http_connection_release_connection_fn)(struct aws_http_connection *connection); -typedef bool(aws_http_connection_is_connection_available_fn)(const struct aws_http_connection *connection); -typedef bool(aws_http_connection_manager_is_callers_thread_fn)(struct aws_channel *channel); -typedef struct aws_channel *(aws_http_connection_manager_connection_get_channel_fn)( - struct aws_http_connection *connection); -typedef enum aws_http_version(aws_http_connection_manager_connection_get_version_fn)( - const struct aws_http_connection *connection); - +/* vtable of functions that aws_http_connection_manager uses to interact with external systems. + * tests override the vtable to mock those systems */ struct aws_http_connection_manager_system_vtable { /* * Downstream http functions */ - aws_http_connection_manager_create_connection_fn *create_connection; - aws_http_connection_manager_close_connection_fn *close_connection; - aws_http_connection_release_connection_fn *release_connection; - aws_http_connection_is_connection_available_fn *is_connection_available; - aws_io_clock_fn *get_monotonic_time; - aws_http_connection_manager_is_callers_thread_fn *is_callers_thread; - aws_http_connection_manager_connection_get_channel_fn *connection_get_channel; - aws_http_connection_manager_connection_get_version_fn *connection_get_version; + int (*aws_http_client_connect)(const struct aws_http_client_connection_options *options); + void (*aws_http_connection_close)(struct aws_http_connection *connection); + void (*aws_http_connection_release)(struct aws_http_connection *connection); + bool (*aws_http_connection_new_requests_allowed)(const struct aws_http_connection *connection); + int (*aws_high_res_clock_get_ticks)(uint64_t *timestamp); + bool (*aws_channel_thread_is_callers_thread)(struct aws_channel *channel); + struct aws_channel *(*aws_http_connection_get_channel)(struct aws_http_connection *connection); + enum aws_http_version (*aws_http_connection_get_version)(const struct aws_http_connection *connection); }; AWS_HTTP_API diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_stream.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_stream.h index df1446ec9b3..8b210c8b1dd 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_stream.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_stream.h @@ -117,6 +117,7 @@ struct aws_h1_stream *aws_h1_stream_new_request( struct aws_h1_stream *aws_h1_stream_new_request_handler(const struct aws_http_request_handler_options *options); int aws_h1_stream_activate(struct aws_http_stream *stream); +void aws_h1_stream_cancel(struct aws_http_stream *stream, int error_code); int aws_h1_stream_send_response(struct aws_h1_stream *stream, struct aws_http_message *response); diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_connection.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_connection.h index 6d42b831602..f2754ab754d 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_connection.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_connection.h @@ -125,6 +125,7 @@ struct aws_h2_connection { uint64_t outgoing_timestamp_ns; /* Timestamp when connection has data to receive, which is when there is an active stream */ uint64_t incoming_timestamp_ns; + } thread_data; /* Any thread may touch this data, but the lock must be held (unless it's an atomic) */ diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/proxy_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/proxy_impl.h index c47305b251e..946fece72b3 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/private/proxy_impl.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/proxy_impl.h @@ -52,7 +52,7 @@ struct aws_http_proxy_config { struct aws_byte_buf host; - uint16_t port; + uint32_t port; struct aws_tls_connection_options *tls_options; @@ -97,7 +97,7 @@ struct aws_http_proxy_user_data { * Cached original connect options */ struct aws_string *original_host; - uint16_t original_port; + uint32_t original_port; void *original_user_data; struct aws_tls_connection_options *original_tls_options; struct aws_client_bootstrap *original_bootstrap; @@ -126,10 +126,16 @@ struct aws_http_proxy_user_data { struct aws_http_proxy_config *proxy_config; struct aws_event_loop *requested_event_loop; + + const struct aws_host_resolution_config *host_resolution_config; }; +/* vtable of functions that proxy uses to interact with external systems. + * tests override the vtable to mock those systems */ struct aws_http_proxy_system_vtable { - int (*setup_client_tls)(struct aws_channel_slot *right_of_slot, struct aws_tls_connection_options *tls_options); + int (*aws_channel_setup_client_tls)( + struct aws_channel_slot *right_of_slot, + struct aws_tls_connection_options *tls_options); }; AWS_EXTERN_C_BEGIN diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/request_response_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/request_response_impl.h index 9cd06e01c24..acc5d9dd7db 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/private/request_response_impl.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/request_response_impl.h @@ -6,6 +6,7 @@ * SPDX-License-Identifier: Apache-2.0. */ +#include <aws/common/task_scheduler.h> #include <aws/http/request_response.h> #include <aws/http/private/http_impl.h> @@ -16,6 +17,7 @@ struct aws_http_stream_vtable { void (*destroy)(struct aws_http_stream *stream); void (*update_window)(struct aws_http_stream *stream, size_t increment_size); int (*activate)(struct aws_http_stream *stream); + void (*cancel)(struct aws_http_stream *stream, int error_code); int (*http1_write_chunk)(struct aws_http_stream *http1_stream, const struct aws_http1_chunk_options *options); int (*http1_add_trailer)(struct aws_http_stream *http1_stream, const struct aws_http_headers *trailing_headers); @@ -43,15 +45,21 @@ struct aws_http_stream { aws_http_on_incoming_headers_fn *on_incoming_headers; aws_http_on_incoming_header_block_done_fn *on_incoming_header_block_done; aws_http_on_incoming_body_fn *on_incoming_body; + aws_http_on_stream_metrics_fn *on_metrics; aws_http_on_stream_complete_fn *on_complete; aws_http_on_stream_destroy_fn *on_destroy; struct aws_atomic_var refcount; enum aws_http_method request_method; + struct aws_http_stream_metrics metrics; union { struct aws_http_stream_client_data { int response_status; + uint64_t response_first_byte_timeout_ms; + /* Using aws_task instead of aws_channel_task because, currently, channel-tasks can't be canceled. + * We only touch this from the connection's thread */ + struct aws_task response_first_byte_timeout_task; } client; struct aws_http_stream_server_data { struct aws_byte_cursor request_method_str; diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/proxy.h b/contrib/restricted/aws/aws-c-http/include/aws/http/proxy.h index cd4c92107df..28fdfb045b5 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/proxy.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/proxy.h @@ -11,6 +11,8 @@ #include <aws/http/request_response.h> #include <aws/http/status_code.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_http_client_connection_options; struct aws_http_connection_manager_options; @@ -112,7 +114,7 @@ struct aws_http_proxy_options { /** * Port to make the proxy connection to */ - uint16_t port; + uint32_t port; /** * Optional. @@ -566,5 +568,6 @@ AWS_HTTP_API int aws_http_proxy_new_socket_channel( const struct aws_http_proxy_options *proxy_options); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_PROXY_STRATEGY_H */ diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/request_response.h b/contrib/restricted/aws/aws-c-http/include/aws/http/request_response.h index a4ff6da9477..73c1900508f 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/request_response.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/request_response.h @@ -8,6 +8,10 @@ #include <aws/http/http.h> +#include <aws/io/future.h> + +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_http_connection; struct aws_input_stream; @@ -135,7 +139,8 @@ typedef void(aws_http_message_transform_fn)( * This is always invoked on the HTTP connection's event-loop thread. * * Return AWS_OP_SUCCESS to continue processing the stream. - * Return AWS_OP_ERR to indicate failure and cancel the stream. + * Return aws_raise_error(E) to indicate failure and cancel the stream. + * The error you raise will be reflected in the error_code passed to the on_complete callback. */ typedef int(aws_http_on_incoming_headers_fn)( struct aws_http_stream *stream, @@ -149,7 +154,8 @@ typedef int(aws_http_on_incoming_headers_fn)( * This is always invoked on the HTTP connection's event-loop thread. * * Return AWS_OP_SUCCESS to continue processing the stream. - * Return AWS_OP_ERR to indicate failure and cancel the stream. + * Return aws_raise_error(E) to indicate failure and cancel the stream. + * The error you raise will be reflected in the error_code passed to the on_complete callback. */ typedef int(aws_http_on_incoming_header_block_done_fn)( struct aws_http_stream *stream, @@ -167,7 +173,8 @@ typedef int(aws_http_on_incoming_header_block_done_fn)( * aws_http_stream_update_window(). * * Return AWS_OP_SUCCESS to continue processing the stream. - * Return AWS_OP_ERR to indicate failure and cancel the stream. + * Return aws_raise_error(E) to indicate failure and cancel the stream. + * The error you raise will be reflected in the error_code passed to the on_complete callback. */ typedef int( aws_http_on_incoming_body_fn)(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data); @@ -177,24 +184,66 @@ typedef int( * This is always invoked on the HTTP connection's event-loop thread. * * Return AWS_OP_SUCCESS to continue processing the stream. - * Return AWS_OP_ERR to indicate failure and cancel the stream. + * Return aws_raise_error(E) to indicate failure and cancel the stream. + * The error you raise will be reflected in the error_code passed to the on_complete callback. */ typedef int(aws_http_on_incoming_request_done_fn)(struct aws_http_stream *stream, void *user_data); /** - * Invoked when request/response stream is completely destroyed. - * This may be invoked synchronously when aws_http_stream_release() is called. - * This is invoked even if the stream is never activated. + * Invoked when a request/response stream is complete, whether successful or unsuccessful + * This is always invoked on the HTTP connection's event-loop thread. + * This will not be invoked if the stream is never activated. */ typedef void(aws_http_on_stream_complete_fn)(struct aws_http_stream *stream, int error_code, void *user_data); /** * Invoked when request/response stream destroy completely. * This can be invoked within the same thead who release the refcount on http stream. + * This is invoked even if the stream is never activated. */ typedef void(aws_http_on_stream_destroy_fn)(void *user_data); /** + * Tracing metrics for aws_http_stream. + * Data maybe not be available if the data of stream was never sent/received before it completes. + */ +struct aws_http_stream_metrics { + /* The time stamp when the request started to be encoded. -1 means data not available. Timestamp + * are from `aws_high_res_clock_get_ticks` */ + int64_t send_start_timestamp_ns; + /* The time stamp when the request finished to be encoded. -1 means data not available. + * Timestamp are from `aws_high_res_clock_get_ticks` */ + int64_t send_end_timestamp_ns; + /* The time duration for the request from start encoding to finish encoding (send_end_timestamp_ns - + * send_start_timestamp_ns). -1 means data not available. */ + int64_t sending_duration_ns; + + /* The time stamp when the response started to be received from the network channel. -1 means data not available. + * Timestamp are from `aws_high_res_clock_get_ticks` */ + int64_t receive_start_timestamp_ns; + /* The time stamp when the response finished to be received from the network channel. -1 means data not available. + * Timestamp are from `aws_high_res_clock_get_ticks` */ + int64_t receive_end_timestamp_ns; + /* The time duration for the request from start receiving to finish receiving. receive_end_timestamp_ns - + * receive_start_timestamp_ns. -1 means data not available. */ + int64_t receiving_duration_ns; + + /* The stream-id on the connection when this stream was activated. */ + uint32_t stream_id; +}; + +/** + * Invoked right before request/response stream is complete to report the tracing metrics for aws_http_stream. + * This may be invoked synchronously when aws_http_stream_release() is called. + * This is invoked even if the stream is never activated. + * See `aws_http_stream_metrics` for details. + */ +typedef void(aws_http_on_stream_metrics_fn)( + struct aws_http_stream *stream, + const struct aws_http_stream_metrics *metrics, + void *user_data); + +/** * Options for creating a stream which sends a request from the client and receives a response from the server. */ struct aws_http_make_request_options { @@ -235,6 +284,13 @@ struct aws_http_make_request_options { aws_http_on_incoming_body_fn *on_response_body; /** + * Invoked right before stream is complete, whether successful or unsuccessful + * Optional. + * See `aws_http_on_stream_metrics_fn` + */ + aws_http_on_stream_metrics_fn *on_metrics; + + /** * Invoked when request/response stream is complete, whether successful or unsuccessful * Optional. * See `aws_http_on_stream_complete_fn`. @@ -249,6 +305,16 @@ struct aws_http_make_request_options { * when data has been supplied via `aws_http2_stream_write_data` */ bool http2_use_manual_data_writes; + + /** + * Optional (ignored if 0). + * After a request is fully sent, if the server does not begin responding within N milliseconds, then fail with + * AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT. + * It override the connection level settings, when the request completes, the + * original monitoring options will be applied back to the connection. + * TODO: Only supported in HTTP/1.1 now, support it in HTTP/2 + */ + uint64_t response_first_byte_timeout_ms; }; struct aws_http_request_handler_options { @@ -807,6 +873,11 @@ AWS_HTTP_API void aws_http_message_set_body_stream(struct aws_http_message *message, struct aws_input_stream *body_stream); /** + * aws_future<aws_http_message*> + */ +AWS_FUTURE_T_POINTER_WITH_RELEASE_DECLARATION(aws_future_http_message, struct aws_http_message, AWS_HTTP_API) + +/** * Submit a chunk of data to be sent on an HTTP/1.1 stream. * The stream must have specified "chunked" in a "transfer-encoding" header. * For client streams, activate() must be called before any chunks are submitted. @@ -973,6 +1044,12 @@ struct aws_http_stream *aws_http_stream_new_server_request_handler( const struct aws_http_request_handler_options *options); /** + * Acquire refcount on the stream to prevent it from being cleaned up until it is released. + */ +AWS_HTTP_API +struct aws_http_stream *aws_http_stream_acquire(struct aws_http_stream *stream); + +/** * Users must release the stream when they are done with it, or its memory will never be cleaned up. * This will not cancel the stream, its callbacks will still fire if the stream is still in progress. * @@ -1038,6 +1115,18 @@ AWS_HTTP_API uint32_t aws_http_stream_get_id(const struct aws_http_stream *stream); /** + * Cancel the stream in flight. + * For HTTP/1.1 streams, it's equivalent to closing the connection. + * For HTTP/2 streams, it's equivalent to calling reset on the stream with `AWS_HTTP2_ERR_CANCEL`. + * + * the stream will complete with the error code provided, unless the stream is + * already completing for other reasons, or the stream is not activated, + * in which case this call will have no impact. + */ +AWS_HTTP_API +void aws_http_stream_cancel(struct aws_http_stream *stream, int error_code); + +/** * Reset the HTTP/2 stream (HTTP/2 only). * Note that if the stream closes before this async call is fully processed, the RST_STREAM frame will not be sent. * @@ -1068,5 +1157,6 @@ AWS_HTTP_API int aws_http2_stream_get_sent_reset_error_code(struct aws_http_stream *http2_stream, uint32_t *out_http2_error); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_REQUEST_RESPONSE_H */ diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/server.h b/contrib/restricted/aws/aws-c-http/include/aws/http/server.h index 0e1be3d8c01..03893355d8a 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/server.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/server.h @@ -8,6 +8,8 @@ #include <aws/http/http.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_http_connection; struct aws_server_bootstrap; struct aws_socket_options; @@ -193,6 +195,13 @@ int aws_http_connection_configure_server( AWS_HTTP_API bool aws_http_connection_is_server(const struct aws_http_connection *connection); +/** + * Returns the local listener endpoint of the HTTP server. Only valid as long as the server remains valid. + */ +AWS_HTTP_API +const struct aws_socket_endpoint *aws_http_server_get_listener_endpoint(const struct aws_http_server *server); + AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_SERVER_H */ diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/statistics.h b/contrib/restricted/aws/aws-c-http/include/aws/http/statistics.h index ecc8c2700ab..7f3790242e0 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/statistics.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/statistics.h @@ -10,6 +10,8 @@ #include <aws/common/statistics.h> +AWS_PUSH_SANE_WARNING_LEVEL + enum aws_crt_http_statistics_category { AWSCRT_STAT_CAT_HTTP1_CHANNEL = AWS_CRT_STATISTICS_CATEGORY_BEGIN_RANGE(AWS_C_HTTP_PACKAGE_ID), AWSCRT_STAT_CAT_HTTP2_CHANNEL, @@ -71,5 +73,6 @@ AWS_HTTP_API void aws_crt_statistics_http2_channel_reset(struct aws_crt_statistics_http2_channel *stats); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_STATISTICS_H */ diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/websocket.h b/contrib/restricted/aws/aws-c-http/include/aws/http/websocket.h index 6f85cafa810..39703b4e2bb 100644 --- a/contrib/restricted/aws/aws-c-http/include/aws/http/websocket.h +++ b/contrib/restricted/aws/aws-c-http/include/aws/http/websocket.h @@ -7,6 +7,8 @@ #include <aws/http/http.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_http_header; struct aws_http_message; @@ -183,7 +185,7 @@ struct aws_websocket_client_connection_options { * Optional. * Defaults to 443 if tls_options is present, 80 if it is not. */ - uint16_t port; + uint32_t port; /** * Required. @@ -280,6 +282,12 @@ struct aws_websocket_client_connection_options { * a single thread. */ struct aws_event_loop *requested_event_loop; + + /** + * Optional + * Host resolution override that allows the user to override DNS behavior for this particular connection. + */ + const struct aws_host_resolution_config *host_resolution_config; }; /** @@ -479,5 +487,6 @@ struct aws_http_message *aws_http_message_new_websocket_handshake_request( struct aws_byte_cursor host); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_WEBSOCKET_H */ diff --git a/contrib/restricted/aws/aws-c-http/source/connection.c b/contrib/restricted/aws/aws-c-http/source/connection.c index f020823dcf1..a53aebe1912 100644 --- a/contrib/restricted/aws/aws-c-http/source/connection.c +++ b/contrib/restricted/aws/aws-c-http/source/connection.c @@ -18,6 +18,7 @@ #include <aws/io/channel_bootstrap.h> #include <aws/io/logging.h> #include <aws/io/socket.h> +#include <aws/io/socket_channel_handler.h> #include <aws/io/tls_channel_handler.h> #ifdef _MSC_VER @@ -26,7 +27,7 @@ #endif static struct aws_http_connection_system_vtable s_default_system_vtable = { - .new_socket_channel = aws_client_bootstrap_new_socket_channel, + .aws_client_bootstrap_new_socket_channel = aws_client_bootstrap_new_socket_channel, }; static const struct aws_http_connection_system_vtable *s_system_vtable_ptr = &s_default_system_vtable; @@ -366,6 +367,16 @@ struct aws_channel *aws_http_connection_get_channel(struct aws_http_connection * return connection->channel_slot->channel; } +const struct aws_socket_endpoint *aws_http_connection_get_remote_endpoint( + const struct aws_http_connection *connection) { + AWS_ASSERT(connection); + struct aws_channel *channel = connection->channel_slot->channel; + /* The first slot for an HTTP connection is always socket */ + struct aws_channel_slot *socket_slot = aws_channel_get_first_slot(channel); + const struct aws_socket *socket = aws_socket_handler_get_socket(socket_slot->handler); + return &socket->remote_endpoint; +} + int aws_http_alpn_map_init(struct aws_allocator *allocator, struct aws_hash_table *map) { AWS_ASSERT(allocator); AWS_ASSERT(map); @@ -495,7 +506,7 @@ static void s_server_bootstrap_on_accept_channel_setup( if (put_err) { AWS_LOGF_ERROR( AWS_LS_HTTP_SERVER, - "%p: %s:%d: Failed to store connection object, error %d (%s).", + "%p: %s:%u: Failed to store connection object, error %d (%s).", (void *)server, server->socket->local_endpoint.address, server->socket->local_endpoint.port, @@ -508,7 +519,7 @@ static void s_server_bootstrap_on_accept_channel_setup( /* Tell user of successful connection. */ AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, - "id=%p: " PRInSTR " server connection established at %p %s:%d.", + "id=%p: " PRInSTR " server connection established at %p %s:%u.", (void *)connection, AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(connection->http_version)), (void *)server, @@ -690,7 +701,7 @@ struct aws_http_server *aws_http_server_new(const struct aws_http_server_options AWS_LOGF_INFO( AWS_LS_HTTP_SERVER, - "%p %s:%d: Server setup complete, listening for incoming connections.", + "%p %s:%u: Server setup complete, listening for incoming connections.", (void *)server, server->socket->local_endpoint.address, server->socket->local_endpoint.port); @@ -740,7 +751,7 @@ void aws_http_server_release(struct aws_http_server *server) { * s_server_bootstrap_on_server_listener_destroy will be invoked, clean up of the server will be there */ AWS_LOGF_INFO( AWS_LS_HTTP_SERVER, - "%p %s:%d: Shutting down the server.", + "%p %s:%u: Shutting down the server.", (void *)server, server->socket->local_endpoint.address, server->socket->local_endpoint.port); @@ -751,6 +762,12 @@ void aws_http_server_release(struct aws_http_server *server) { * clean up will be called from eventloop */ } +const struct aws_socket_endpoint *aws_http_server_get_listener_endpoint(const struct aws_http_server *server) { + AWS_FATAL_ASSERT(server); + + return &server->socket->local_endpoint; +} + /* At this point, the channel bootstrapper has established a connection to the server and set up a channel. * Now we need to create the aws_http_connection and insert it into the channel as a channel-handler. */ static void s_client_bootstrap_on_channel_setup( @@ -823,6 +840,8 @@ static void s_client_bootstrap_on_channel_setup( } http_bootstrap->connection->proxy_request_transform = http_bootstrap->proxy_request_transform; + http_bootstrap->connection->client_data->response_first_byte_timeout_ms = + http_bootstrap->response_first_byte_timeout_ms; AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, @@ -1062,6 +1081,7 @@ int aws_http_client_connect_internal( http_bootstrap->proxy_request_transform = proxy_request_transform; http_bootstrap->http1_options = *options.http1_options; http_bootstrap->http2_options = *options.http2_options; + http_bootstrap->response_first_byte_timeout_ms = options.response_first_byte_timeout_ms; /* keep a copy of the settings array if it's not NULL */ if (options.http2_options->num_initial_settings > 0) { @@ -1085,9 +1105,9 @@ int aws_http_client_connect_internal( AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, - "static: attempting to initialize a new client channel to %s:%d", + "static: attempting to initialize a new client channel to %s:%u", aws_string_c_str(host_name), - (int)options.port); + options.port); struct aws_socket_channel_bootstrap_options channel_options = { .bootstrap = options.bootstrap, @@ -1100,9 +1120,10 @@ int aws_http_client_connect_internal( .enable_read_back_pressure = options.manual_window_management, .user_data = http_bootstrap, .requested_event_loop = options.requested_event_loop, + .host_resolution_override_config = options.host_resolution_config, }; - err = s_system_vtable_ptr->new_socket_channel(&channel_options); + err = s_system_vtable_ptr->aws_client_bootstrap_new_socket_channel(&channel_options); if (err) { AWS_LOGF_ERROR( diff --git a/contrib/restricted/aws/aws-c-http/source/connection_manager.c b/contrib/restricted/aws/aws-c-http/source/connection_manager.c index 30eda61778f..195a7d6caf6 100644 --- a/contrib/restricted/aws/aws-c-http/source/connection_manager.c +++ b/contrib/restricted/aws/aws-c-http/source/connection_manager.c @@ -43,22 +43,22 @@ struct aws_idle_connection { * System vtable to use under normal circumstances */ static struct aws_http_connection_manager_system_vtable s_default_system_vtable = { - .create_connection = aws_http_client_connect, - .release_connection = aws_http_connection_release, - .close_connection = aws_http_connection_close, - .is_connection_available = aws_http_connection_new_requests_allowed, - .get_monotonic_time = aws_high_res_clock_get_ticks, - .is_callers_thread = aws_channel_thread_is_callers_thread, - .connection_get_channel = aws_http_connection_get_channel, - .connection_get_version = aws_http_connection_get_version, + .aws_http_client_connect = aws_http_client_connect, + .aws_http_connection_release = aws_http_connection_release, + .aws_http_connection_close = aws_http_connection_close, + .aws_http_connection_new_requests_allowed = aws_http_connection_new_requests_allowed, + .aws_high_res_clock_get_ticks = aws_high_res_clock_get_ticks, + .aws_channel_thread_is_callers_thread = aws_channel_thread_is_callers_thread, + .aws_http_connection_get_channel = aws_http_connection_get_channel, + .aws_http_connection_get_version = aws_http_connection_get_version, }; const struct aws_http_connection_manager_system_vtable *g_aws_http_connection_manager_default_system_vtable_ptr = &s_default_system_vtable; bool aws_http_connection_manager_system_vtable_is_valid(const struct aws_http_connection_manager_system_vtable *table) { - return table->create_connection && table->close_connection && table->release_connection && - table->is_connection_available; + return table->aws_http_client_connect && table->aws_http_connection_close && table->aws_http_connection_release && + table->aws_http_connection_new_requests_allowed; } enum aws_http_connection_manager_state_type { AWS_HCMST_UNINITIALIZED, AWS_HCMST_READY, AWS_HCMST_SHUTTING_DOWN }; @@ -236,7 +236,7 @@ struct aws_http_connection_manager { struct aws_string *host; struct proxy_env_var_settings proxy_ev_settings; struct aws_tls_connection_options *proxy_ev_tls_options; - uint16_t port; + uint32_t port; /* * HTTP/2 specific. */ @@ -433,13 +433,13 @@ static void s_aws_http_connection_manager_complete_acquisitions( if (pending_acquisition->error_code == AWS_OP_SUCCESS) { - struct aws_channel *channel = - pending_acquisition->manager->system_vtable->connection_get_channel(pending_acquisition->connection); + struct aws_channel *channel = pending_acquisition->manager->system_vtable->aws_http_connection_get_channel( + pending_acquisition->connection); AWS_PRECONDITION(channel); /* For some workloads, going ahead and moving the connection callback to the connection's thread is a * substantial performance improvement so let's do that */ - if (!pending_acquisition->manager->system_vtable->is_callers_thread(channel)) { + if (!pending_acquisition->manager->system_vtable->aws_channel_thread_is_callers_thread(channel)) { aws_channel_task_init( &pending_acquisition->acquisition_task, s_connection_acquisition_task, @@ -776,7 +776,7 @@ static void s_schedule_connection_culling(struct aws_http_connection_manager *ma * culling interval from now. */ uint64_t now = 0; - manager->system_vtable->get_monotonic_time(&now); + manager->system_vtable->aws_high_res_clock_get_ticks(&now); cull_task_time = now + aws_timestamp_convert( manager->max_connection_idle_in_milliseconds, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); @@ -1024,7 +1024,7 @@ static int s_aws_http_connection_manager_new_connection(struct aws_http_connecti options.proxy_options = &proxy_options; } - if (manager->system_vtable->create_connection(&options)) { + if (manager->system_vtable->aws_http_client_connect(&options)) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: http connection creation failed with error code %d(%s)", @@ -1061,7 +1061,7 @@ static void s_aws_http_connection_manager_execute_transaction(struct aws_connect "id=%p: Releasing connection (id=%p)", (void *)manager, (void *)idle_connection->connection); - manager->system_vtable->release_connection(idle_connection->connection); + manager->system_vtable->aws_http_connection_release(idle_connection->connection); aws_mem_release(idle_connection->allocator, idle_connection); } @@ -1071,7 +1071,7 @@ static void s_aws_http_connection_manager_execute_transaction(struct aws_connect "id=%p: Releasing connection (id=%p)", (void *)manager, (void *)work->connection_to_release); - manager->system_vtable->release_connection(work->connection_to_release); + manager->system_vtable->aws_http_connection_release(work->connection_to_release); } /* @@ -1194,7 +1194,7 @@ static int s_idle_connection(struct aws_http_connection_manager *manager, struct idle_connection->connection = connection; uint64_t idle_start_timestamp = 0; - if (manager->system_vtable->get_monotonic_time(&idle_start_timestamp)) { + if (manager->system_vtable->aws_high_res_clock_get_ticks(&idle_start_timestamp)) { goto on_error; } @@ -1223,7 +1223,7 @@ int aws_http_connection_manager_release_connection( s_aws_connection_management_transaction_init(&work, manager); int result = AWS_OP_ERR; - bool should_release_connection = !manager->system_vtable->is_connection_available(connection); + bool should_release_connection = !manager->system_vtable->aws_http_connection_new_requests_allowed(connection); AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, @@ -1413,7 +1413,8 @@ static void s_aws_http_connection_manager_on_connection_setup( s_connection_manager_internal_ref_increase(manager, AWS_HCMCT_OPEN_CONNECTION, 1); } - if (connection != NULL && manager->system_vtable->connection_get_version(connection) == AWS_HTTP_VERSION_2) { + if (connection != NULL && + manager->system_vtable->aws_http_connection_get_version(connection) == AWS_HTTP_VERSION_2) { /* If the manager is shutting down, we will still wait for the settings, since we don't have map for connections */ ++manager->pending_settings_count; @@ -1492,7 +1493,7 @@ static void s_cull_idle_connections(struct aws_http_connection_manager *manager) } uint64_t now = 0; - if (manager->system_vtable->get_monotonic_time(&now)) { + if (manager->system_vtable->aws_high_res_clock_get_ticks(&now)) { return; } diff --git a/contrib/restricted/aws/aws-c-http/source/h1_connection.c b/contrib/restricted/aws/aws-c-http/source/h1_connection.c index 3532bb80d94..903cf038144 100644 --- a/contrib/restricted/aws/aws-c-http/source/h1_connection.c +++ b/contrib/restricted/aws/aws-c-http/source/h1_connection.c @@ -11,6 +11,7 @@ #include <aws/http/private/h1_stream.h> #include <aws/http/private/request_response_impl.h> #include <aws/http/status_code.h> +#include <aws/io/event_loop.h> #include <aws/io/logging.h> #include <inttypes.h> @@ -371,6 +372,7 @@ int aws_h1_stream_activate(struct aws_http_stream *stream) { /* connection keeps activated stream alive until stream completes */ aws_atomic_fetch_add(&stream->refcount, 1); + stream->metrics.stream_id = stream->id; if (should_schedule_task) { AWS_LOGF_TRACE( @@ -386,6 +388,34 @@ int aws_h1_stream_activate(struct aws_http_stream *stream) { return AWS_OP_SUCCESS; } +void aws_h1_stream_cancel(struct aws_http_stream *stream, int error_code) { + struct aws_h1_stream *h1_stream = AWS_CONTAINER_OF(stream, struct aws_h1_stream, base); + struct aws_http_connection *base_connection = stream->owning_connection; + struct aws_h1_connection *connection = AWS_CONTAINER_OF(base_connection, struct aws_h1_connection, base); + + { /* BEGIN CRITICAL SECTION */ + aws_h1_connection_lock_synced_data(connection); + if (h1_stream->synced_data.api_state != AWS_H1_STREAM_API_STATE_ACTIVE || + connection->synced_data.is_open == false) { + /* Not active, nothing to cancel. */ + aws_h1_connection_unlock_synced_data(connection); + AWS_LOGF_DEBUG(AWS_LS_HTTP_STREAM, "id=%p: Stream not active, nothing to cancel.", (void *)stream); + return; + } + + aws_h1_connection_unlock_synced_data(connection); + } /* END CRITICAL SECTION */ + AWS_LOGF_INFO( + AWS_LS_HTTP_CONNECTION, + "id=%p: Connection shutting down due to stream=%p cancelled with error code %d (%s).", + (void *)&connection->base, + (void *)stream, + error_code, + aws_error_name(error_code)); + + s_stop(connection, false /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, error_code); +} + struct aws_http_stream *s_make_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { @@ -534,6 +564,7 @@ static int s_aws_http1_switch_protocols(struct aws_h1_connection *connection) { static void s_stream_complete(struct aws_h1_stream *stream, int error_code) { struct aws_h1_connection *connection = AWS_CONTAINER_OF(stream->base.owning_connection, struct aws_h1_connection, base); + AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); /* * If this is the end of a successful CONNECT request, mark ourselves as pass-through since the proxy layer @@ -546,6 +577,14 @@ static void s_stream_complete(struct aws_h1_stream *stream, int error_code) { } } + if (stream->base.client_data && stream->base.client_data->response_first_byte_timeout_task.fn != NULL) { + /* There is an outstanding response timeout task, but stream completed, we can cancel it now. We are + * safe to do it as we always on connection thread to schedule the task or cancel it */ + struct aws_event_loop *connection_loop = aws_channel_get_event_loop(connection->base.channel_slot->channel); + /* The task will be zeroed out within the call */ + aws_event_loop_cancel_task(connection_loop, &stream->base.client_data->response_first_byte_timeout_task); + } + if (error_code != AWS_ERROR_SUCCESS) { if (stream->base.client_data && stream->is_incoming_message_done) { /* As a request that finished receiving the response, we ignore error and @@ -633,6 +672,10 @@ static void s_stream_complete(struct aws_h1_stream *stream, int error_code) { aws_h1_chunk_complete_and_destroy(chunk, &stream->base, AWS_ERROR_HTTP_STREAM_HAS_COMPLETED); } + if (stream->base.on_metrics) { + stream->base.on_metrics(&stream->base, &stream->base.metrics, stream->base.user_data); + } + /* Invoke callback and clean up stream. */ if (stream->base.on_complete) { stream->base.on_complete(&stream->base, error_code, stream->base.user_data); @@ -716,6 +759,87 @@ static void s_client_update_incoming_stream_ptr(struct aws_h1_connection *connec s_set_incoming_stream_ptr(connection, desired); } +static void s_http_stream_response_first_byte_timeout_task( + struct aws_task *task, + void *arg, + enum aws_task_status status) { + (void)task; + struct aws_h1_stream *stream = arg; + struct aws_http_connection *connection_base = stream->base.owning_connection; + /* zero-out task to indicate that it's no longer scheduled */ + AWS_ZERO_STRUCT(stream->base.client_data->response_first_byte_timeout_task); + + if (status == AWS_TASK_STATUS_CANCELED) { + return; + } + + struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base); + /* Timeout happened, close the connection */ + uint64_t response_first_byte_timeout_ms = stream->base.client_data->response_first_byte_timeout_ms == 0 + ? connection_base->client_data->response_first_byte_timeout_ms + : stream->base.client_data->response_first_byte_timeout_ms; + AWS_LOGF_INFO( + AWS_LS_HTTP_CONNECTION, + "id=%p: Closing connection as timeout after request sent to the first byte received happened. " + "response_first_byte_timeout_ms is %" PRIu64 ".", + (void *)connection_base, + response_first_byte_timeout_ms); + + /* Don't stop reading/writing immediately, let that happen naturally during the channel shutdown process. */ + s_stop( + connection, + false /*stop_reading*/, + false /*stop_writing*/, + true /*schedule_shutdown*/, + AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT); +} + +static void s_set_outgoing_message_done(struct aws_h1_stream *stream) { + struct aws_http_connection *connection = stream->base.owning_connection; + struct aws_channel *channel = aws_http_connection_get_channel(connection); + AWS_ASSERT(aws_channel_thread_is_callers_thread(channel)); + + if (stream->is_outgoing_message_done) { + /* Already did the job */ + return; + } + + stream->is_outgoing_message_done = true; + AWS_ASSERT(stream->base.metrics.send_end_timestamp_ns == -1); + aws_high_res_clock_get_ticks((uint64_t *)&stream->base.metrics.send_end_timestamp_ns); + AWS_ASSERT(stream->base.metrics.send_start_timestamp_ns != -1); + AWS_ASSERT(stream->base.metrics.send_end_timestamp_ns >= stream->base.metrics.send_start_timestamp_ns); + stream->base.metrics.sending_duration_ns = + stream->base.metrics.send_end_timestamp_ns - stream->base.metrics.send_start_timestamp_ns; + if (stream->base.metrics.receive_start_timestamp_ns == -1) { + /* We haven't receive any message, schedule the response timeout task */ + + uint64_t response_first_byte_timeout_ms = 0; + if (stream->base.client_data != NULL && connection->client_data != NULL) { + response_first_byte_timeout_ms = stream->base.client_data->response_first_byte_timeout_ms == 0 + ? connection->client_data->response_first_byte_timeout_ms + : stream->base.client_data->response_first_byte_timeout_ms; + } + if (response_first_byte_timeout_ms != 0) { + /* The task should not be initialized before. */ + AWS_ASSERT(stream->base.client_data->response_first_byte_timeout_task.fn == NULL); + aws_task_init( + &stream->base.client_data->response_first_byte_timeout_task, + s_http_stream_response_first_byte_timeout_task, + stream, + "http_stream_response_first_byte_timeout_task"); + uint64_t now_ns = 0; + aws_channel_current_clock_time(channel, &now_ns); + struct aws_event_loop *connection_loop = aws_channel_get_event_loop(channel); + aws_event_loop_schedule_task_future( + connection_loop, + &stream->base.client_data->response_first_byte_timeout_task, + now_ns + aws_timestamp_convert( + response_first_byte_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); + } + } +} + /** * If necessary, update `outgoing_stream` so it is pointing at a stream * with data to send, or NULL if all streams are done sending data. @@ -730,7 +854,7 @@ static struct aws_h1_stream *s_update_outgoing_stream_ptr(struct aws_h1_connecti /* If current stream is done sending data... */ if (current && !aws_h1_encoder_is_message_in_progress(&connection->thread_data.encoder)) { - current->is_outgoing_message_done = true; + s_set_outgoing_message_done(current); /* RFC-7230 section 6.6: Tear-down. * If this was the final stream, don't allows any further streams to be sent */ @@ -801,9 +925,13 @@ static struct aws_h1_stream *s_update_outgoing_stream_ptr(struct aws_h1_connecti s_set_outgoing_stream_ptr(connection, current); if (current) { + AWS_ASSERT(current->base.metrics.send_start_timestamp_ns == -1); + aws_high_res_clock_get_ticks((uint64_t *)¤t->base.metrics.send_start_timestamp_ns); + err = aws_h1_encoder_start_message( &connection->thread_data.encoder, ¤t->encoder_message, ¤t->base); (void)err; + AWS_ASSERT(connection->thread_data.encoder.state == AWS_H1_ENCODER_STATE_INIT); AWS_ASSERT(!err); } @@ -1109,7 +1237,7 @@ static int s_decoder_on_header(const struct aws_h1_decoded_header *header, void AWS_LS_HTTP_STREAM, "id=%p: Received 'Connection: close' header, no more request data will be sent.", (void *)&incoming_stream->base); - incoming_stream->is_outgoing_message_done = true; + s_set_outgoing_message_done(incoming_stream); } /* Stop writing right now. * Shutdown will be scheduled after we finishing parsing the response */ @@ -1270,6 +1398,13 @@ static int s_decoder_on_done(void *user_data) { /* Otherwise the incoming stream is finished decoding and we will update it if needed */ incoming_stream->is_incoming_message_done = true; + aws_high_res_clock_get_ticks((uint64_t *)&incoming_stream->base.metrics.receive_end_timestamp_ns); + AWS_ASSERT(incoming_stream->base.metrics.receive_start_timestamp_ns != -1); + AWS_ASSERT( + incoming_stream->base.metrics.receive_end_timestamp_ns >= + incoming_stream->base.metrics.receive_start_timestamp_ns); + incoming_stream->base.metrics.receiving_duration_ns = incoming_stream->base.metrics.receive_end_timestamp_ns - + incoming_stream->base.metrics.receive_start_timestamp_ns; /* RFC-7230 section 6.6 * After reading the final message, the connection must not read any more */ @@ -1822,6 +1957,20 @@ static int s_try_process_next_stream_read_message(struct aws_h1_connection *conn bool body_headers_ignored = incoming_stream->base.request_method == AWS_HTTP_METHOD_HEAD; aws_h1_decoder_set_body_headers_ignored(connection->thread_data.incoming_stream_decoder, body_headers_ignored); + if (incoming_stream->base.metrics.receive_start_timestamp_ns == -1) { + /* That's the first time for the stream receives any message */ + aws_high_res_clock_get_ticks((uint64_t *)&incoming_stream->base.metrics.receive_start_timestamp_ns); + if (incoming_stream->base.client_data && + incoming_stream->base.client_data->response_first_byte_timeout_task.fn != NULL) { + /* There is an outstanding response timeout task, as we already received the data, we can cancel it now. We + * are safe to do it as we always on connection thread to schedule the task or cancel it */ + struct aws_event_loop *connection_loop = aws_channel_get_event_loop(connection->base.channel_slot->channel); + /* The task will be zeroed out within the call */ + aws_event_loop_cancel_task( + connection_loop, &incoming_stream->base.client_data->response_first_byte_timeout_task); + } + } + /* As decoder runs, it invokes the internal s_decoder_X callbacks, which in turn invoke user callbacks. * The decoder will stop once it hits the end of the request/response OR the end of the message data. */ if (aws_h1_decode(connection->thread_data.incoming_stream_decoder, &message_cursor)) { diff --git a/contrib/restricted/aws/aws-c-http/source/h1_decoder.c b/contrib/restricted/aws/aws-c-http/source/h1_decoder.c index 68e5aa224ae..c615d09a658 100644 --- a/contrib/restricted/aws/aws-c-http/source/h1_decoder.c +++ b/contrib/restricted/aws/aws-c-http/source/h1_decoder.c @@ -301,7 +301,7 @@ static int s_linestate_chunk_size(struct aws_h1_decoder *decoder, struct aws_byt decoder->logging_id, AWS_BYTE_CURSOR_PRI(input)); - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } int err = aws_byte_cursor_utf8_parse_u64_hex(size, &decoder->chunk_size); diff --git a/contrib/restricted/aws/aws-c-http/source/h1_encoder.c b/contrib/restricted/aws/aws-c-http/source/h1_encoder.c index 1899d2f4025..277dce94633 100644 --- a/contrib/restricted/aws/aws-c-http/source/h1_encoder.c +++ b/contrib/restricted/aws/aws-c-http/source/h1_encoder.c @@ -403,10 +403,7 @@ int aws_h1_encoder_message_init_from_response( goto error; } - err = aws_byte_buf_init(&message->outgoing_head_buf, allocator, head_total_len); - if (err) { - return AWS_OP_ERR; - } + aws_byte_buf_init(&message->outgoing_head_buf, allocator, head_total_len); bool wrote_all = true; @@ -489,9 +486,9 @@ static size_t s_calculate_chunk_line_size(const struct aws_http1_chunk_options * size_t chunk_line_size = MAX_ASCII_HEX_CHUNK_STR_SIZE + CRLF_SIZE; for (size_t i = 0; i < options->num_extensions; ++i) { struct aws_http1_chunk_extension *chunk_extension = options->extensions + i; - chunk_line_size += sizeof(';'); + chunk_line_size += 1 /* ; */; chunk_line_size += chunk_extension->key.len; - chunk_line_size += sizeof('='); + chunk_line_size += 1 /* = */; chunk_line_size += chunk_extension->value.len; } return chunk_line_size; diff --git a/contrib/restricted/aws/aws-c-http/source/h1_stream.c b/contrib/restricted/aws/aws-c-http/source/h1_stream.c index a5d2f4782b4..ef8f086bfca 100644 --- a/contrib/restricted/aws/aws-c-http/source/h1_stream.c +++ b/contrib/restricted/aws/aws-c-http/source/h1_stream.c @@ -329,6 +329,7 @@ static const struct aws_http_stream_vtable s_stream_vtable = { .destroy = s_stream_destroy, .update_window = s_stream_update_window, .activate = aws_h1_stream_activate, + .cancel = aws_h1_stream_cancel, .http1_write_chunk = s_stream_write_chunk, .http1_add_trailer = s_stream_add_trailer, .http2_reset_stream = NULL, @@ -361,6 +362,12 @@ static struct aws_h1_stream *s_stream_new_common( stream->base.on_incoming_body = on_incoming_body; stream->base.on_complete = on_complete; stream->base.on_destroy = on_destroy; + stream->base.metrics.send_start_timestamp_ns = -1; + stream->base.metrics.send_end_timestamp_ns = -1; + stream->base.metrics.sending_duration_ns = -1; + stream->base.metrics.receive_start_timestamp_ns = -1; + stream->base.metrics.receive_end_timestamp_ns = -1; + stream->base.metrics.receiving_duration_ns = -1; aws_channel_task_init( &stream->cross_thread_work_task, s_stream_cross_thread_work_task, stream, "http1_stream_cross_thread_work"); @@ -401,6 +408,8 @@ struct aws_h1_stream *aws_h1_stream_new_request( stream->base.client_data = &stream->base.client_or_server_data.client; stream->base.client_data->response_status = AWS_HTTP_STATUS_CODE_UNKNOWN; + stream->base.client_data->response_first_byte_timeout_ms = options->response_first_byte_timeout_ms; + stream->base.on_metrics = options->on_metrics; /* Validate request and cache info that the encoder will eventually need */ if (aws_h1_encoder_message_init_from_request( diff --git a/contrib/restricted/aws/aws-c-http/source/h2_connection.c b/contrib/restricted/aws/aws-c-http/source/h2_connection.c index 15ea192f8ab..f27c005f38f 100644 --- a/contrib/restricted/aws/aws-c-http/source/h2_connection.c +++ b/contrib/restricted/aws/aws-c-http/source/h2_connection.c @@ -1531,7 +1531,7 @@ static struct aws_h2err s_decoder_on_settings_ack(void *userdata) { } connection->thread_data.settings_self[settings_array[i].id] = settings_array[i].value; } - /* invoke the change settings compeleted user callback */ + /* invoke the change settings completed user callback */ if (pending_settings->on_completed) { pending_settings->on_completed(&connection->base, AWS_ERROR_SUCCESS, pending_settings->user_data); } @@ -2057,6 +2057,7 @@ int aws_h2_stream_activate(struct aws_http_stream *stream) { /* connection keeps activated stream alive until stream completes */ aws_atomic_fetch_add(&stream->refcount, 1); + stream->metrics.stream_id = stream->id; if (!was_cross_thread_work_scheduled) { CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task"); diff --git a/contrib/restricted/aws/aws-c-http/source/h2_stream.c b/contrib/restricted/aws/aws-c-http/source/h2_stream.c index 85232db0066..6d8336bfd3d 100644 --- a/contrib/restricted/aws/aws-c-http/source/h2_stream.c +++ b/contrib/restricted/aws/aws-c-http/source/h2_stream.c @@ -5,6 +5,7 @@ #include <aws/http/private/h2_stream.h> +#include <aws/common/clock.h> #include <aws/http/private/h2_connection.h> #include <aws/http/private/strutil.h> #include <aws/http/status_code.h> @@ -26,12 +27,17 @@ static int s_stream_write_data( static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status); static struct aws_h2err s_send_rst_and_close_stream(struct aws_h2_stream *stream, struct aws_h2err stream_error); -static int s_stream_reset_stream_internal(struct aws_http_stream *stream_base, struct aws_h2err stream_error); +static int s_stream_reset_stream_internal( + struct aws_http_stream *stream_base, + struct aws_h2err stream_error, + bool cancelling); +static void s_stream_cancel(struct aws_http_stream *stream, int error_code); struct aws_http_stream_vtable s_h2_stream_vtable = { .destroy = s_stream_destroy, .update_window = s_stream_update_window, .activate = aws_h2_stream_activate, + .cancel = s_stream_cancel, .http1_write_chunk = NULL, .http2_reset_stream = s_stream_reset_stream, .http2_get_received_error_code = s_stream_get_received_error_code, @@ -240,10 +246,17 @@ struct aws_h2_stream *aws_h2_stream_new_request( stream->base.on_incoming_headers = options->on_response_headers; stream->base.on_incoming_header_block_done = options->on_response_header_block_done; stream->base.on_incoming_body = options->on_response_body; + stream->base.on_metrics = options->on_metrics; stream->base.on_complete = options->on_complete; stream->base.on_destroy = options->on_destroy; stream->base.client_data = &stream->base.client_or_server_data.client; stream->base.client_data->response_status = AWS_HTTP_STATUS_CODE_UNKNOWN; + stream->base.metrics.send_start_timestamp_ns = -1; + stream->base.metrics.send_end_timestamp_ns = -1; + stream->base.metrics.sending_duration_ns = -1; + stream->base.metrics.receive_start_timestamp_ns = -1; + stream->base.metrics.receive_end_timestamp_ns = -1; + stream->base.metrics.receiving_duration_ns = -1; aws_linked_list_init(&stream->thread_data.outgoing_writes); aws_linked_list_init(&stream->synced_data.pending_write_list); @@ -446,6 +459,9 @@ void aws_h2_stream_complete(struct aws_h2_stream *stream, int error_code) { s_h2_stream_destroy_pending_writes(stream); /* Invoke callback */ + if (stream->base.on_metrics) { + stream->base.on_metrics(&stream->base, &stream->base.metrics, stream->base.user_data); + } if (stream->base.on_complete) { stream->base.on_complete(&stream->base, error_code, stream->base.user_data); } @@ -515,12 +531,16 @@ static void s_stream_update_window(struct aws_http_stream *stream_base, size_t i .h2_code = AWS_HTTP2_ERR_INTERNAL_ERROR, }; /* Only when stream is not initialized reset will fail. So, we can assert it to be succeed. */ - AWS_FATAL_ASSERT(s_stream_reset_stream_internal(stream_base, stream_error) == AWS_OP_SUCCESS); + AWS_FATAL_ASSERT( + s_stream_reset_stream_internal(stream_base, stream_error, false /*cancelling*/) == AWS_OP_SUCCESS); } return; } -static int s_stream_reset_stream_internal(struct aws_http_stream *stream_base, struct aws_h2err stream_error) { +static int s_stream_reset_stream_internal( + struct aws_http_stream *stream_base, + struct aws_h2err stream_error, + bool cancelling) { struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base); struct aws_h2_connection *connection = s_get_h2_connection(stream); @@ -542,21 +562,25 @@ static int s_stream_reset_stream_internal(struct aws_http_stream *stream_base, s } /* END CRITICAL SECTION */ if (stream_is_init) { + if (cancelling) { + /* Not an error if we are just cancelling. */ + AWS_LOGF_DEBUG(AWS_LS_HTTP_STREAM, "id=%p: Stream not in process, nothing to cancel.", (void *)stream); + return AWS_OP_SUCCESS; + } AWS_H2_STREAM_LOG( ERROR, stream, "Reset stream failed. Stream is in initialized state, please activate the stream first."); return aws_raise_error(AWS_ERROR_INVALID_STATE); } + if (reset_called) { + AWS_H2_STREAM_LOG(DEBUG, stream, "Reset stream ignored. Reset stream has been called already."); + } + if (cross_thread_work_should_schedule) { AWS_H2_STREAM_LOG(TRACE, stream, "Scheduling stream cross-thread work task"); /* increment the refcount of stream to keep it alive until the task runs */ aws_atomic_fetch_add(&stream->base.refcount, 1); aws_channel_schedule_task_now(connection->base.channel_slot->channel, &stream->cross_thread_work_task); - return AWS_OP_SUCCESS; } - if (reset_called) { - AWS_H2_STREAM_LOG(DEBUG, stream, "Reset stream ignored. Reset stream has been called already."); - } - return AWS_OP_SUCCESS; } @@ -572,7 +596,16 @@ static int s_stream_reset_stream(struct aws_http_stream *stream_base, uint32_t h (void *)stream_base, aws_http2_error_code_to_str(http2_error), http2_error); - return s_stream_reset_stream_internal(stream_base, stream_error); + return s_stream_reset_stream_internal(stream_base, stream_error, false /*cancelling*/); +} + +void s_stream_cancel(struct aws_http_stream *stream_base, int error_code) { + struct aws_h2err stream_error = { + .aws_code = error_code, + .h2_code = AWS_HTTP2_ERR_CANCEL, + }; + s_stream_reset_stream_internal(stream_base, stream_error, true /*cancelling*/); + return; } static int s_stream_get_received_error_code(struct aws_http_stream *stream_base, uint32_t *out_http2_error) { @@ -706,7 +739,8 @@ int aws_h2_stream_on_activated(struct aws_h2_stream *stream, enum aws_h2_stream_ AWS_H2_STREAM_LOGF(ERROR, stream, "Failed to create HEADERS frame: %s", aws_error_name(aws_last_error())); goto error; } - + AWS_ASSERT(stream->base.metrics.send_start_timestamp_ns == -1); + aws_high_res_clock_get_ticks((uint64_t *)&stream->base.metrics.send_start_timestamp_ns); /* Initialize the flow-control window size */ stream->thread_data.window_size_peer = connection->thread_data.settings_peer[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; @@ -721,6 +755,11 @@ int aws_h2_stream_on_activated(struct aws_h2_stream *stream, enum aws_h2_stream_ /* If stream has no body, then HEADERS frame marks the end of outgoing data */ stream->thread_data.state = AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL; AWS_H2_STREAM_LOG(TRACE, stream, "Sending HEADERS with END_STREAM. State -> HALF_CLOSED_LOCAL"); + /* There is no further frames to be sent, now is the end timestamp of sending. */ + AWS_ASSERT(stream->base.metrics.send_end_timestamp_ns == -1); + aws_high_res_clock_get_ticks((uint64_t *)&stream->base.metrics.send_end_timestamp_ns); + stream->base.metrics.sending_duration_ns = + stream->base.metrics.send_end_timestamp_ns - stream->base.metrics.send_start_timestamp_ns; } if (s_h2_stream_has_outgoing_writes(stream)) { @@ -798,6 +837,11 @@ int aws_h2_stream_encode_data_frame( */ if (input_stream_complete && ends_stream) { /* Done sending data. No more data will be sent. */ + AWS_ASSERT(stream->base.metrics.send_end_timestamp_ns == -1); + aws_high_res_clock_get_ticks((uint64_t *)&stream->base.metrics.send_end_timestamp_ns); + stream->base.metrics.sending_duration_ns = + stream->base.metrics.send_end_timestamp_ns - stream->base.metrics.send_start_timestamp_ns; + if (stream->thread_data.state == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE) { /* Both sides have sent END_STREAM */ stream->thread_data.state = AWS_H2_STREAM_STATE_CLOSED; @@ -841,6 +885,7 @@ struct aws_h2err aws_h2_stream_on_decoder_headers_begin(struct aws_h2_stream *st if (aws_h2err_failed(stream_err)) { return s_send_rst_and_close_stream(stream, stream_err); } + aws_high_res_clock_get_ticks((uint64_t *)&stream->base.metrics.receive_start_timestamp_ns); return AWS_H2ERR_SUCCESS; } @@ -1150,6 +1195,13 @@ struct aws_h2err aws_h2_stream_on_decoder_end_stream(struct aws_h2_stream *strea * an actual frame type. It's a flag on DATA or HEADERS frames, and we * already checked the legality of those frames in their respective callbacks. */ + AWS_ASSERT(stream->base.metrics.receive_start_timestamp_ns != -1); + AWS_ASSERT(stream->base.metrics.receive_end_timestamp_ns == -1); + aws_high_res_clock_get_ticks((uint64_t *)&stream->base.metrics.receive_end_timestamp_ns); + AWS_ASSERT(stream->base.metrics.receive_end_timestamp_ns >= stream->base.metrics.receive_start_timestamp_ns); + stream->base.metrics.receiving_duration_ns = + stream->base.metrics.receive_end_timestamp_ns - stream->base.metrics.receive_start_timestamp_ns; + if (stream->thread_data.content_length_received) { if (stream->base.request_method != AWS_HTTP_METHOD_HEAD && stream->base.client_data->response_status != AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED) { diff --git a/contrib/restricted/aws/aws-c-http/source/hpack.c b/contrib/restricted/aws/aws-c-http/source/hpack.c index ef3d0b3dcfa..83aee646782 100644 --- a/contrib/restricted/aws/aws-c-http/source/hpack.c +++ b/contrib/restricted/aws/aws-c-http/source/hpack.c @@ -103,7 +103,7 @@ void aws_hpack_static_table_init(struct aws_allocator *allocator) { } } -void aws_hpack_static_table_clean_up() { +void aws_hpack_static_table_clean_up(void) { aws_hash_table_clean_up(&s_static_header_reverse_lookup); aws_hash_table_clean_up(&s_static_header_reverse_lookup_name_only); } diff --git a/contrib/restricted/aws/aws-c-http/source/http.c b/contrib/restricted/aws/aws-c-http/source/http.c index 8a8fe92bd19..af61c51517a 100644 --- a/contrib/restricted/aws/aws-c-http/source/http.c +++ b/contrib/restricted/aws/aws-c-http/source/http.c @@ -148,6 +148,9 @@ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED, "Manual write failed because manual writes are already completed."), + AWS_DEFINE_ERROR_INFO_HTTP( + AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT, + "The server does not begin responding within the configuration after a request is fully sent."), }; /* clang-format on */ @@ -537,7 +540,7 @@ void aws_http_library_clean_up(void) { aws_io_library_clean_up(); } -void aws_http_fatal_assert_library_initialized() { +void aws_http_fatal_assert_library_initialized(void) { if (!s_library_initialized) { AWS_LOGF_FATAL( AWS_LS_HTTP_GENERAL, diff --git a/contrib/restricted/aws/aws-c-http/source/proxy_connection.c b/contrib/restricted/aws/aws-c-http/source/proxy_connection.c index e6cdb8a2460..3706c2fd543 100644 --- a/contrib/restricted/aws/aws-c-http/source/proxy_connection.c +++ b/contrib/restricted/aws/aws-c-http/source/proxy_connection.c @@ -39,7 +39,7 @@ AWS_STATIC_STRING_FROM_LITERAL(s_proxy_no_verify_peer_env_var, "AWS_PROXY_NO_VER #endif static struct aws_http_proxy_system_vtable s_default_vtable = { - .setup_client_tls = &aws_channel_setup_client_tls, + .aws_channel_setup_client_tls = &aws_channel_setup_client_tls, }; static struct aws_http_proxy_system_vtable *s_vtable = &s_default_vtable; @@ -162,6 +162,7 @@ struct aws_http_proxy_user_data *aws_http_proxy_user_data_new( user_data->original_channel_on_setup = on_channel_setup; user_data->original_channel_on_shutdown = on_channel_shutdown; user_data->requested_event_loop = options.requested_event_loop; + user_data->host_resolution_config = options.host_resolution_config; user_data->prior_knowledge_http2 = options.prior_knowledge_http2; /* one and only one setup callback must be valid */ @@ -465,7 +466,7 @@ static struct aws_http_message *s_build_h1_proxy_connect_request(struct aws_http } char port_str[20] = "\0"; - snprintf(port_str, sizeof(port_str), "%d", (int)user_data->original_port); + snprintf(port_str, sizeof(port_str), "%u", user_data->original_port); struct aws_byte_cursor port_cursor = aws_byte_cursor_from_c_str(port_str); if (aws_byte_buf_append(&path_buffer, &port_cursor)) { goto on_error; @@ -771,7 +772,7 @@ static void s_aws_http_on_stream_complete_tunnel_proxy( last_slot = last_slot->adj_right; } - if (s_vtable->setup_client_tls(last_slot, context->original_tls_options)) { + if (s_vtable->aws_channel_setup_client_tls(last_slot, context->original_tls_options)) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "(%p) Proxy connection failed to start TLS negotiation with error %d(%s)", @@ -1048,6 +1049,7 @@ static int s_aws_http_client_connect_via_forwarding_proxy(const struct aws_http_ options_copy.on_shutdown = s_aws_http_on_client_connection_http_proxy_shutdown_fn; options_copy.tls_options = options->proxy_options->tls_options; options_copy.requested_event_loop = options->requested_event_loop; + options_copy.host_resolution_config = options->host_resolution_config; options_copy.prior_knowledge_http2 = false; /* ToDo, expose the protocol specific config for proxy connection. */ int result = aws_http_client_connect_internal(&options_copy, s_proxy_http_request_transform); @@ -1084,6 +1086,7 @@ static int s_create_tunneling_connection(struct aws_http_proxy_user_data *user_d connect_options.http1_options = NULL; /* ToDo, expose the protocol specific config for proxy connection. */ connect_options.http2_options = NULL; /* ToDo */ connect_options.requested_event_loop = user_data->requested_event_loop; + connect_options.host_resolution_config = user_data->host_resolution_config; int result = aws_http_client_connect(&connect_options); if (result == AWS_OP_ERR) { @@ -1137,6 +1140,23 @@ static enum aws_http_proxy_connection_type s_determine_proxy_connection_type( } } +static struct aws_string *s_get_proxy_environment_value( + struct aws_allocator *allocator, + const struct aws_string *env_name) { + struct aws_string *out_string = NULL; + if (aws_get_environment_value(allocator, env_name, &out_string) == AWS_OP_SUCCESS && out_string != NULL && + out_string->len > 0) { + AWS_LOGF_DEBUG( + AWS_LS_HTTP_CONNECTION, + "%s environment found, %s", + aws_string_c_str(env_name), + aws_string_c_str(out_string)); + return out_string; + } + aws_string_destroy(out_string); + return NULL; +} + static int s_proxy_uri_init_from_env_variable( struct aws_allocator *allocator, const struct aws_http_client_connection_options *options, @@ -1145,25 +1165,19 @@ static int s_proxy_uri_init_from_env_variable( struct aws_string *proxy_uri_string = NULL; *found = false; if (options->tls_options) { - if (aws_get_environment_value(allocator, s_https_proxy_env_var_low, &proxy_uri_string) == AWS_OP_SUCCESS && - proxy_uri_string != NULL) { - AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION, "https_proxy environment found"); - } else if ( - aws_get_environment_value(allocator, s_https_proxy_env_var, &proxy_uri_string) == AWS_OP_SUCCESS && - proxy_uri_string != NULL) { - AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION, "HTTPS_PROXY environment found"); - } else { + proxy_uri_string = s_get_proxy_environment_value(allocator, s_https_proxy_env_var_low); + if (proxy_uri_string == NULL) { + proxy_uri_string = s_get_proxy_environment_value(allocator, s_https_proxy_env_var); + } + if (proxy_uri_string == NULL) { return AWS_OP_SUCCESS; } } else { - if (aws_get_environment_value(allocator, s_http_proxy_env_var_low, &proxy_uri_string) == AWS_OP_SUCCESS && - proxy_uri_string != NULL) { - AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION, "http_proxy environment found"); - } else if ( - aws_get_environment_value(allocator, s_http_proxy_env_var, &proxy_uri_string) == AWS_OP_SUCCESS && - proxy_uri_string != NULL) { - AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION, "HTTP_PROXY environment found"); - } else { + proxy_uri_string = s_get_proxy_environment_value(allocator, s_http_proxy_env_var_low); + if (proxy_uri_string == NULL) { + proxy_uri_string = s_get_proxy_environment_value(allocator, s_http_proxy_env_var); + } + if (proxy_uri_string == NULL) { return AWS_OP_SUCCESS; } } @@ -1213,7 +1227,7 @@ static int s_setup_proxy_tls_env_variable( AWS_LS_HTTP_CONNECTION, "Failed making default TLS context because of BYO_CRYPTO, set up the tls_options for proxy_env_settings to " "make it work."); - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); #else struct aws_tls_ctx *tls_ctx = NULL; struct aws_tls_ctx_options tls_ctx_options; @@ -1642,6 +1656,7 @@ int aws_http_proxy_new_socket_channel( http_connection_options.on_setup = NULL; /* use channel callbacks, not http callbacks */ http_connection_options.on_shutdown = NULL; /* use channel callbacks, not http callbacks */ http_connection_options.requested_event_loop = channel_options->requested_event_loop; + http_connection_options.host_resolution_config = channel_options->host_resolution_override_config; if (s_aws_http_client_connect_via_tunneling_proxy( &http_connection_options, s_http_proxied_socket_channel_setup, s_http_proxied_socket_channel_shutdown)) { diff --git a/contrib/restricted/aws/aws-c-http/source/request_response.c b/contrib/restricted/aws/aws-c-http/source/request_response.c index c382a3a4d0e..dbd5214e762 100644 --- a/contrib/restricted/aws/aws-c-http/source/request_response.c +++ b/contrib/restricted/aws/aws-c-http/source/request_response.c @@ -867,6 +867,11 @@ int aws_http_message_get_header( return aws_http_headers_get_index(message->headers, index, out_header); } +AWS_FUTURE_T_POINTER_WITH_RELEASE_IMPLEMENTATION( + aws_future_http_message, + struct aws_http_message, + aws_http_message_release) + struct aws_http_stream *aws_http_connection_make_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { @@ -952,8 +957,8 @@ struct aws_http_message *aws_http2_message_new_from_http1( scheme_cursor.ptr); /** - * An intermediary that forwards a request over HTTP/2 MUST construct an ":authority" pseudo-header field using - * the authority information from the control data of the original request. (RFC=9113 8.3.1) + * An intermediary that forwards a request over HTTP/2 MUST construct an ":authority" pseudo-header field + * using the authority information from the control data of the original request. (RFC=9113 8.3.1) */ struct aws_byte_cursor host_value; AWS_ZERO_STRUCT(host_value); @@ -970,7 +975,8 @@ struct aws_http_message *aws_http2_message_new_from_http1( (int)host_value.len, host_value.ptr); } - /* TODO: If the host headers is missing, the target URI could be the other source of the authority information + /* TODO: If the host headers is missing, the target URI could be the other source of the authority + * information */ struct aws_byte_cursor path_cursor; @@ -1107,6 +1113,15 @@ int aws_http_stream_send_response(struct aws_http_stream *stream, struct aws_htt return stream->owning_connection->vtable->stream_send_response(stream, response); } +struct aws_http_stream *aws_http_stream_acquire(struct aws_http_stream *stream) { + AWS_PRECONDITION(stream); + + size_t prev_refcount = aws_atomic_fetch_add(&stream->refcount, 1); + AWS_LOGF_TRACE( + AWS_LS_HTTP_STREAM, "id=%p: Stream refcount acquired, %zu remaining.", (void *)stream, prev_refcount + 1); + return stream; +} + void aws_http_stream_release(struct aws_http_stream *stream) { if (!stream) { return; @@ -1186,6 +1201,10 @@ uint32_t aws_http_stream_get_id(const struct aws_http_stream *stream) { return stream->id; } +void aws_http_stream_cancel(struct aws_http_stream *stream, int error_code) { + stream->vtable->cancel(stream, error_code); +} + int aws_http2_stream_reset(struct aws_http_stream *http2_stream, uint32_t http2_error) { AWS_PRECONDITION(http2_stream); AWS_PRECONDITION(http2_stream->vtable); diff --git a/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c b/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c index b5225873059..6c66c8515c5 100644 --- a/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c +++ b/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c @@ -215,6 +215,7 @@ int aws_websocket_client_connect(const struct aws_websocket_client_connection_op http_options.on_setup = s_ws_bootstrap_on_http_setup; http_options.on_shutdown = s_ws_bootstrap_on_http_shutdown; http_options.requested_event_loop = options->requested_event_loop; + http_options.host_resolution_config = options->host_resolution_config; /* Infer port, if not explicitly specified in URI */ http_options.port = options->port; @@ -234,7 +235,7 @@ int aws_websocket_client_connect(const struct aws_websocket_client_connection_op /* Success! (so far) */ AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET_SETUP, - "id=%p: Websocket setup begun, connecting to " PRInSTR ":%" PRIu16 PRInSTR, + "id=%p: Websocket setup begun, connecting to " PRInSTR ":%" PRIu32 PRInSTR, (void *)ws_bootstrap, AWS_BYTE_CURSOR_PRI(options->host), options->port, diff --git a/contrib/restricted/aws/aws-c-http/ya.make b/contrib/restricted/aws/aws-c-http/ya.make index 641db7b8269..25cf51985e2 100644 --- a/contrib/restricted/aws/aws-c-http/ya.make +++ b/contrib/restricted/aws/aws-c-http/ya.make @@ -6,9 +6,9 @@ LICENSE(Apache-2.0) LICENSE_TEXTS(.yandex_meta/licenses.list.txt) -VERSION(0.7.6) +VERSION(0.8.1) -ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-http/archive/v0.7.6.tar.gz) +ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-http/archive/v0.8.1.tar.gz) PEERDIR( contrib/restricted/aws/aws-c-cal diff --git a/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.copyrights.report b/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.copyrights.report index 6f6fb2348b4..7ddf02f55cd 100644 --- a/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.copyrights.report +++ b/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.copyrights.report @@ -41,20 +41,24 @@ BELONGS ya.make Match type : COPYRIGHT Files with this license: NOTICE [2:2] + include/aws/io/async_stream.h [2:2] include/aws/io/channel.h [4:4] include/aws/io/channel_bootstrap.h [5:5] include/aws/io/event_loop.h [5:5] include/aws/io/exports.h [5:5] include/aws/io/file_utils.h [5:5] + include/aws/io/future.h [2:2] include/aws/io/host_resolver.h [4:4] include/aws/io/io.h [5:5] include/aws/io/logging.h [5:5] include/aws/io/message_pool.h [4:4] + include/aws/io/pem.h [5:5] include/aws/io/pipe.h [5:5] include/aws/io/pkcs11.h [4:4] include/aws/io/private/pem_utils.h [5:5] include/aws/io/private/pki_utils.h [4:4] include/aws/io/private/tls_channel_handler_shared.h [4:4] + include/aws/io/private/tracing.h [5:5] include/aws/io/retry_strategy.h [4:4] include/aws/io/shared_library.h [5:5] include/aws/io/socket.h [4:4] @@ -64,20 +68,21 @@ BELONGS ya.make include/aws/io/tls_channel_handler.h [4:4] include/aws/io/uri.h [4:4] source/alpn_handler.c [2:2] + source/async_stream.c [2:2] source/bsd/kqueue_event_loop.c [2:2] source/channel.c [2:2] source/channel_bootstrap.c [2:2] source/event_loop.c [2:2] source/exponential_backoff_retry_strategy.c [2:2] + source/future.c [2:2] source/host_resolver.c [2:2] source/io.c [2:2] source/linux/epoll_event_loop.c [2:2] source/message_pool.c [2:2] - source/pem_utils.c [2:2] + source/pem.c [2:2] source/pkcs11_lib.c [2:2] source/pkcs11_private.h [5:5] source/pkcs11_tls_op_handler.c [2:2] - source/pki_utils.c [2:2] source/posix/host_resolver.c [2:2] source/posix/pipe.c [2:2] source/posix/shared_library.c [2:2] @@ -85,32 +90,10 @@ BELONGS ya.make source/retry_strategy.c [2:2] source/s2n/s2n_tls_channel_handler.c [2:2] source/socket_channel_handler.c [2:2] + source/socket_shared.c [2:2] source/standard_retry_strategy.c [2:2] source/statistics.c [2:2] source/stream.c [2:2] source/tls_channel_handler.c [2:2] source/tls_channel_handler_shared.c [2:2] - -KEEP COPYRIGHT_SERVICE_LABEL 2e35409d3a27ad4f26ee063de186689a -BELONGS ya.make - License text: - Copyright (c) OASIS Open 2016. All Rights Reserved. - Scancode info: - Original SPDX id: COPYRIGHT_SERVICE_LABEL - Score : 100.00 - Match type : COPYRIGHT - Files with this license: - THIRD-PARTY-LICENSES.txt [3:3] - -KEEP COPYRIGHT_SERVICE_LABEL b3779e02b4352ffc4ebd94151bac83c8 -BELONGS ya.make - License text: - /* Copyright (c) OASIS Open 2016. All Rights Reserved./ - Scancode info: - Original SPDX id: COPYRIGHT_SERVICE_LABEL - Score : 100.00 - Match type : COPYRIGHT - Files with this license: - source/pkcs11/v2.40/pkcs11.h [1:1] - source/pkcs11/v2.40/pkcs11f.h [1:1] - source/pkcs11/v2.40/pkcs11t.h [1:1] + source/tracing.c [2:2] diff --git a/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.licenses.report b/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.licenses.report index ab86bf29a0b..fb9f392168f 100644 --- a/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.licenses.report +++ b/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.licenses.report @@ -31,18 +31,17 @@ # FILE_INCLUDE - include all file data into licenses text file # ======================= -KEEP Custom-Oasis-Pkcs11 2661c322534a1eac3a81a0e2c173c27e +KEEP Public-Domain 036b6be51c84bc54e82d1dc5d9f297f2 BELONGS ya.make - Note: matched license text is too long. Read it in the source files. + License text: + The source/pkcs11/v2.40/pkcs11.h header file is based on Public Domain content from https://github.com/latchset/pkcs11-headers Scancode info: - Original SPDX id: LicenseRef-scancode-oasis-ipr-policy-2014 - Score : 100.00 - Match type : REFERENCE - Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/oasis-ipr-policy-2014.LICENSE, https://www.oasis-open.org/policies-guidelines/ipr + Original SPDX id: LicenseRef-scancode-public-domain + Score : 99.00 + Match type : TEXT + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE Files with this license: - source/pkcs11/v2.40/pkcs11.h [2:5] - source/pkcs11/v2.40/pkcs11f.h [2:5] - source/pkcs11/v2.40/pkcs11t.h [2:5] + NOTICE [5:5] KEEP Apache-2.0 2b42edef8fa55315f34f2370b4715ca9 BELONGS ya.make @@ -56,18 +55,29 @@ FILE_INCLUDE NOTICE found in files: LICENSE at line 107, LICENSE at line 110, LI Files with this license: LICENSE [2:202] -KEEP Custom-Oasis-Pkcs11 34e6010c1f019f721ac79740e9f0a963 +KEEP Public-Domain 3049c5f7a0e142a3ee62eb098a0987ea BELONGS ya.make License text: - FITNESS FOR A PARTICULAR PURPOSE. OASIS AND ITS MEMBERS WILL NOT BE LIABLE FOR - ANY DIRECT, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE + /* This file is in the Public Domain. */ Scancode info: - Original SPDX id: LicenseRef-scancode-warranty-disclaimer - Score : 19.35 + Original SPDX id: LicenseRef-scancode-public-domain + Score : 100.00 Match type : TEXT - Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/warranty-disclaimer.LICENSE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + source/pkcs11/v2.40/pkcs11.h [1:1] + +KEEP Public-Domain 3483f96bb9a2043f114ebeb1ec77ead5 +BELONGS ya.make + License text: + /* This file is based on: https://github.com/latchset/pkcs11-headers/blob/main/public-domain/2.40/pkcs11.h */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE Files with this license: - THIRD-PARTY-LICENSES.txt [29:30] + source/pkcs11/v2.40/pkcs11.h [2:2] KEEP Apache-2.0 43f57e875cdc02e8385ff667f85d702e BELONGS ya.make @@ -117,20 +127,24 @@ BELONGS ya.make Match type : TAG Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0 Files with this license: + include/aws/io/async_stream.h [3:3] include/aws/io/channel.h [5:5] include/aws/io/channel_bootstrap.h [6:6] include/aws/io/event_loop.h [6:6] include/aws/io/exports.h [6:6] include/aws/io/file_utils.h [6:6] + include/aws/io/future.h [3:3] include/aws/io/host_resolver.h [5:5] include/aws/io/io.h [6:6] include/aws/io/logging.h [6:6] include/aws/io/message_pool.h [5:5] + include/aws/io/pem.h [6:6] include/aws/io/pipe.h [6:6] include/aws/io/pkcs11.h [5:5] include/aws/io/private/pem_utils.h [6:6] include/aws/io/private/pki_utils.h [5:5] include/aws/io/private/tls_channel_handler_shared.h [5:5] + include/aws/io/private/tracing.h [6:6] include/aws/io/retry_strategy.h [5:5] include/aws/io/shared_library.h [6:6] include/aws/io/socket.h [5:5] @@ -140,20 +154,21 @@ BELONGS ya.make include/aws/io/tls_channel_handler.h [5:5] include/aws/io/uri.h [5:5] source/alpn_handler.c [3:3] + source/async_stream.c [3:3] source/bsd/kqueue_event_loop.c [3:3] source/channel.c [3:3] source/channel_bootstrap.c [3:3] source/event_loop.c [3:3] source/exponential_backoff_retry_strategy.c [3:3] + source/future.c [3:3] source/host_resolver.c [3:3] source/io.c [3:3] source/linux/epoll_event_loop.c [3:3] source/message_pool.c [3:3] - source/pem_utils.c [3:3] + source/pem.c [3:3] source/pkcs11_lib.c [3:3] source/pkcs11_private.h [6:6] source/pkcs11_tls_op_handler.c [3:3] - source/pki_utils.c [3:3] source/posix/host_resolver.c [3:3] source/posix/pipe.c [3:3] source/posix/shared_library.c [3:3] @@ -161,22 +176,13 @@ BELONGS ya.make source/retry_strategy.c [3:3] source/s2n/s2n_tls_channel_handler.c [3:3] source/socket_channel_handler.c [3:3] + source/socket_shared.c [3:3] source/standard_retry_strategy.c [3:3] source/statistics.c [3:3] source/stream.c [3:3] source/tls_channel_handler.c [3:3] source/tls_channel_handler_shared.c [3:3] - -KEEP Custom-Oasis-Pkcs11 e561d19ebbe9cbf3e19e2ad68aca5ade -BELONGS ya.make - Note: matched license text is too long. Read it in the source files. - Scancode info: - Original SPDX id: LicenseRef-scancode-ecma-documentation - Score : 93.43 - Match type : TEXT - Links : http://www.ecma-international.org/publications/DISCLAIMER.pdf, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/ecma-documentation.LICENSE - Files with this license: - THIRD-PARTY-LICENSES.txt [10:29] + source/tracing.c [3:3] SKIP LicenseRef-scancode-generic-cla ee24fdc60600747c7d12c32055b0011d BELONGS ya.make diff --git a/contrib/restricted/aws/aws-c-io/.yandex_meta/licenses.list.txt b/contrib/restricted/aws/aws-c-io/.yandex_meta/licenses.list.txt index aa9cdb63ae9..b1edbd776f3 100644 --- a/contrib/restricted/aws/aws-c-io/.yandex_meta/licenses.list.txt +++ b/contrib/restricted/aws/aws-c-io/.yandex_meta/licenses.list.txt @@ -214,54 +214,26 @@ This library is licensed under the Apache 2.0 License. ====================Apache-2.0==================== SPDX-License-Identifier: Apache-2.0. -====================COPYRIGHT==================== -/* Copyright (c) OASIS Open 2016. All Rights Reserved./ - - -====================COPYRIGHT==================== -Copyright (c) OASIS Open 2016. All Rights Reserved. - ====================COPYRIGHT==================== Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -====================Custom-Oasis-Pkcs11==================== - * /Distributed under the terms of the OASIS IPR Policy, - * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY - * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A - * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. - +====================File: NOTICE==================== +AWS C Io +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0. -====================Custom-Oasis-Pkcs11==================== -FITNESS FOR A PARTICULAR PURPOSE. OASIS AND ITS MEMBERS WILL NOT BE LIABLE FOR -ANY DIRECT, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE +The source/pkcs11/v2.40/pkcs11.h header file is based on Public Domain content from https://github.com/latchset/pkcs11-headers -====================Custom-Oasis-Pkcs11==================== -This document and translations of it may be copied and furnished to others, and -derivative works that comment on or otherwise explain it or assist in its -implementation may be prepared, copied, published, and distributed, in whole or -in part, without restriction of any kind, provided that the above copyright -notice and this section are included on all such copies and derivative works. -However, this document itself may not be modified in any way, including by -removing the copyright notice or references to OASIS, except as needed for the -purpose of developing any document or deliverable produced by an OASIS -Technical Committee (in which case the rules applicable to copyrights, as set -forth in the OASIS IPR Policy, must be followed) or as required to translate it -into languages other than English. +====================Public-Domain==================== +/* This file is based on: https://github.com/latchset/pkcs11-headers/blob/main/public-domain/2.40/pkcs11.h */ -The limited permissions granted above are perpetual and will not be revoked by -OASIS or its successors or assigns. -This document and the information contained herein is provided on an "AS IS" -basis and OASIS DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT -LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION HEREIN WILL NOT -INFRINGE ANY OWNERSHIP RIGHTS OR ANY IMPLIED WARRANTIES OF MERCHANTABILITY OR -FITNESS FOR A PARTICULAR PURPOSE. OASIS AND ITS MEMBERS WILL NOT BE LIABLE FOR +====================Public-Domain==================== +/* This file is in the Public Domain. */ -====================File: NOTICE==================== -AWS C Io -Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -SPDX-License-Identifier: Apache-2.0. +====================Public-Domain==================== +The source/pkcs11/v2.40/pkcs11.h header file is based on Public Domain content from https://github.com/latchset/pkcs11-headers
\ No newline at end of file diff --git a/contrib/restricted/aws/aws-c-io/.yandex_meta/override.nix b/contrib/restricted/aws/aws-c-io/.yandex_meta/override.nix index 5278363e09d..e76cb45f953 100644 --- a/contrib/restricted/aws/aws-c-io/.yandex_meta/override.nix +++ b/contrib/restricted/aws/aws-c-io/.yandex_meta/override.nix @@ -1,10 +1,10 @@ pkgs: attrs: with pkgs; with attrs; rec { - version = "0.13.21"; + version = "0.14.7"; src = fetchFromGitHub { owner = "awslabs"; repo = "aws-c-io"; rev = "v${version}"; - hash = "sha256-YexLE75SJwzX+xZEXJWu1XGr+zSLnUYvYC0zWcOvU/0="; + hash = "sha256-Z4o1vv/8FWp3S7GfLDsV0a8ih+IdJIUC0DL4klOXjnw="; }; } diff --git a/contrib/restricted/aws/aws-c-io/NOTICE b/contrib/restricted/aws/aws-c-io/NOTICE index 37529d92f02..7939cd2397d 100644 --- a/contrib/restricted/aws/aws-c-io/NOTICE +++ b/contrib/restricted/aws/aws-c-io/NOTICE @@ -1,3 +1,5 @@ AWS C Io Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0. + +The source/pkcs11/v2.40/pkcs11.h header file is based on Public Domain content from https://github.com/latchset/pkcs11-headers diff --git a/contrib/restricted/aws/aws-c-io/README.md b/contrib/restricted/aws/aws-c-io/README.md index c480cb876a6..c922bb3068b 100644 --- a/contrib/restricted/aws/aws-c-io/README.md +++ b/contrib/restricted/aws/aws-c-io/README.md @@ -208,7 +208,7 @@ Sockets interact directly with the underlying io and are invoked directly by the Platform | Implementation --- | --- -Linux | Signal-to-noise (s2n) see github.com/awslabs/s2n +Linux | Signal-to-noise (s2n) see: https://github.com/aws/s2n-tls BSD Variants | s2n Apple Devices | Security Framework/ Secure Transport. See https://developer.apple.com/documentation/security/secure_transport Windows | Secure Channel. See https://msdn.microsoft.com/en-us/library/windows/desktop/aa380123(v=vs.85).aspx diff --git a/contrib/restricted/aws/aws-c-io/THIRD-PARTY-LICENSES.txt b/contrib/restricted/aws/aws-c-io/THIRD-PARTY-LICENSES.txt deleted file mode 100644 index ae301c689cf..00000000000 --- a/contrib/restricted/aws/aws-c-io/THIRD-PARTY-LICENSES.txt +++ /dev/null @@ -1,31 +0,0 @@ -** PKCS#11 Headers; version 2.40 -- http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/errata01/os/include/pkcs11-v2.40/ - -Copyright (c) OASIS Open 2016. All Rights Reserved. - -All capitalized terms in the following text have the meanings assigned to them -in the OASIS Intellectual Property Rights Policy (the "OASIS IPR Policy"). The -full Policy may be found at the OASIS website: -[http://www.oasis-open.org/policies-guidelines/ipr] - -This document and translations of it may be copied and furnished to others, and -derivative works that comment on or otherwise explain it or assist in its -implementation may be prepared, copied, published, and distributed, in whole or -in part, without restriction of any kind, provided that the above copyright -notice and this section are included on all such copies and derivative works. -However, this document itself may not be modified in any way, including by -removing the copyright notice or references to OASIS, except as needed for the -purpose of developing any document or deliverable produced by an OASIS -Technical Committee (in which case the rules applicable to copyrights, as set -forth in the OASIS IPR Policy, must be followed) or as required to translate it -into languages other than English. - -The limited permissions granted above are perpetual and will not be revoked by -OASIS or its successors or assigns. - -This document and the information contained herein is provided on an "AS IS" -basis and OASIS DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT -LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION HEREIN WILL NOT -INFRINGE ANY OWNERSHIP RIGHTS OR ANY IMPLIED WARRANTIES OF MERCHANTABILITY OR -FITNESS FOR A PARTICULAR PURPOSE. OASIS AND ITS MEMBERS WILL NOT BE LIABLE FOR -ANY DIRECT, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE -OF THIS DOCUMENT OR ANY PART THEREOF. diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/async_stream.h b/contrib/restricted/aws/aws-c-io/include/aws/io/async_stream.h new file mode 100644 index 00000000000..2e76bdd7ec4 --- /dev/null +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/async_stream.h @@ -0,0 +1,117 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#ifndef AWS_IO_ASYNC_STREAM_H +#define AWS_IO_ASYNC_STREAM_H + +/** + * THIS IS AN EXPERIMENTAL AND UNSTABLE API + * TODO: logging + * TODO: modify API to return byte-bufs, instead of filling in the provided byte-buf? + * this would avoid a copy in the use-cases we know of, but it's more complex + * TODO: vtable acquire()/release()? + * TODO: protect against simultaneous reads? + * TODO: check results of vtable->read() (i.e. 0 byte reads not allowed)? + * this would require 1 or 2 additional allocations per read + */ + +#include <aws/io/io.h> + +#include <aws/common/ref_count.h> + +AWS_PUSH_SANE_WARNING_LEVEL + +struct aws_async_input_stream; +struct aws_byte_buf; +struct aws_future_bool; +struct aws_input_stream; + +struct aws_async_input_stream { + const struct aws_async_input_stream_vtable *vtable; + struct aws_allocator *alloc; + struct aws_ref_count ref_count; + void *impl; +}; + +struct aws_async_input_stream_vtable { + /** + * Destroy the stream, its refcount has reached 0. + */ + void (*destroy)(struct aws_async_input_stream *stream); + + /** + * Read once into the buffer. + * Complete the read when at least 1 byte is read, the buffer is full, or EOF is reached. + * Do not resize the buffer (do not use "aws_byte_buf_xyz_dynamic()" functions) + * Do not assume that buffer len starts at 0. + * You may assume that read() won't be called again until the current one completes. + * You may assume that the buffer has some space available. + * Return a future, which will contain an error code if something went wrong, + * or a result bool indicating whether EOF has been reached. + */ + struct aws_future_bool *(*read)(struct aws_async_input_stream *stream, struct aws_byte_buf *dest); +}; + +AWS_EXTERN_C_BEGIN + +/** + * Initialize aws_async_input_stream "base class" + */ +AWS_IO_API +void aws_async_input_stream_init_base( + struct aws_async_input_stream *stream, + struct aws_allocator *alloc, + const struct aws_async_input_stream_vtable *vtable, + void *impl); + +/** + * Increment reference count. + * You may pass in NULL (has no effect). + * Returns whatever pointer was passed in. + */ +AWS_IO_API +struct aws_async_input_stream *aws_async_input_stream_acquire(struct aws_async_input_stream *stream); + +/** + * Decrement reference count. + * You may pass in NULL (has no effect). + * Always returns NULL. + */ +AWS_IO_API +struct aws_async_input_stream *aws_async_input_stream_release(struct aws_async_input_stream *stream); + +/** + * Read once from the async stream into the buffer. + * The read completes when at least 1 byte is read, the buffer is full, or EOF is reached. + * Depending on implementation, the read could complete at any time. + * It may complete synchronously. It may complete on another thread. + * Returns a future, which will contain an error code if something went wrong, + * or a result bool indicating whether EOF has been reached. + * + * WARNING: The buffer must have space available. + * WARNING: Do not read again until the previous read is complete. + */ +AWS_IO_API +struct aws_future_bool *aws_async_input_stream_read(struct aws_async_input_stream *stream, struct aws_byte_buf *dest); + +/** + * Read repeatedly from the async stream until the buffer is full, or EOF is reached. + * Depending on implementation, this could complete at any time. + * It may complete synchronously. It may complete on another thread. + * Returns a future, which will contain an error code if something went wrong, + * or a result bool indicating whether EOF has been reached. + * + * WARNING: The buffer must have space available. + * WARNING: Do not read again until the previous read is complete. + */ +AWS_IO_API +struct aws_future_bool *aws_async_input_stream_read_to_fill( + struct aws_async_input_stream *stream, + struct aws_byte_buf *dest); + +AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL + +#endif /* AWS_IO_ASYNC_STREAM_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/channel.h b/contrib/restricted/aws/aws-c-io/include/aws/io/channel.h index 50dc5cce26f..1f8401f9a3a 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/channel.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/channel.h @@ -10,6 +10,8 @@ #include <aws/common/statistics.h> #include <aws/common/task_scheduler.h> +AWS_PUSH_SANE_WARNING_LEVEL + enum aws_channel_direction { AWS_CHANNEL_DIR_READ, AWS_CHANNEL_DIR_WRITE, @@ -52,6 +54,8 @@ struct aws_channel_handler_vtable { /** * Called by the channel when a message is available for processing in the read direction. It is your * responsibility to call aws_mem_release(message->allocator, message); on message when you are finished with it. + * You must only call `aws_mem_release(message->allocator, message);` if the `process_read_message` + * returns AWS_OP_SUCCESS. In case of an error, you must not clean up the message and should just raise the error. * * Also keep in mind that your slot's internal window has been decremented. You'll want to call * aws_channel_slot_increment_read_window() at some point in the future if you want to keep receiving data. @@ -63,6 +67,8 @@ struct aws_channel_handler_vtable { /** * Called by the channel when a message is available for processing in the write direction. It is your * responsibility to call aws_mem_release(message->allocator, message); on message when you are finished with it. + * You must only call `aws_mem_release(message->allocator, message);` if the `process_read_message` + * returns AWS_OP_SUCCESS. In case of an error, you must not clean up the message and should just raise the error. */ int (*process_write_message)( struct aws_channel_handler *handler, @@ -504,5 +510,6 @@ AWS_IO_API int aws_channel_trigger_read(struct aws_channel *channel); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_CHANNEL_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/channel_bootstrap.h b/contrib/restricted/aws/aws-c-io/include/aws/io/channel_bootstrap.h index ae97ba6b4ed..e65794756ff 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/channel_bootstrap.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/channel_bootstrap.h @@ -9,6 +9,8 @@ #include <aws/io/channel.h> #include <aws/io/host_resolver.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_client_bootstrap; struct aws_socket; struct aws_socket_options; @@ -176,7 +178,7 @@ struct aws_server_bootstrap { struct aws_socket_channel_bootstrap_options { struct aws_client_bootstrap *bootstrap; const char *host_name; - uint16_t port; + uint32_t port; const struct aws_socket_options *socket_options; const struct aws_tls_connection_options *tls_options; aws_client_bootstrap_on_channel_event_fn *creation_callback; @@ -206,7 +208,7 @@ struct aws_socket_channel_bootstrap_options { struct aws_server_socket_channel_bootstrap_options { struct aws_server_bootstrap *bootstrap; const char *host_name; - uint16_t port; + uint32_t port; const struct aws_socket_options *socket_options; const struct aws_tls_connection_options *tls_options; aws_server_bootstrap_on_accept_channel_setup_fn *incoming_callback; @@ -305,5 +307,6 @@ AWS_IO_API void aws_server_bootstrap_destroy_socket_listener( struct aws_socket *listener); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_CHANNEL_BOOTSTRAP_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/event_loop.h b/contrib/restricted/aws/aws-c-io/include/aws/io/event_loop.h index 32d6268697e..a3b552d6ea2 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/event_loop.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/event_loop.h @@ -12,6 +12,8 @@ #include <aws/io/io.h> +AWS_PUSH_SANE_WARNING_LEVEL + enum aws_io_event_type { AWS_IO_EVENT_TYPE_READABLE = 1, AWS_IO_EVENT_TYPE_WRITABLE = 2, @@ -470,5 +472,6 @@ AWS_IO_API struct aws_event_loop *aws_event_loop_group_get_next_loop(struct aws_event_loop_group *el_group); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_EVENT_LOOP_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/future.h b/contrib/restricted/aws/aws-c-io/include/aws/io/future.h new file mode 100644 index 00000000000..a32969289b4 --- /dev/null +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/future.h @@ -0,0 +1,627 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#ifndef AWS_IO_FUTURE_H +#define AWS_IO_FUTURE_H + +/* + +// THIS IS AN EXPERIMENTAL AND UNSTABLE API +// +// An aws_future is used to deliver the result of an asynchronous function. +// +// When an async function is called, it creates a future and returns it to the caller. +// When the async work is finished, it completes the future by setting an error or result value. +// The caller waits until the future is done, checks for error, and then gets +// the result if everything was OK. Typically, the caller waits by registering +// a callback that the future invokes when it's done. +// +// If result type T has a "destructor" (clean_up(), destroy(), or release() function), +// then the future has set_result_by_move() and get_result_by_move() functions +// that explicitly transfer ownership to and from the future. +// If the future dies, and still "owns" the resource, it calls the destructor. +// If T has no destructor, then the future has set_result() and get_result() +// functions that simply copy T by value. +// +// Macros are used to define a type-safe API for each result type T, +// similar to C++ templates. This makes the API hard to browse, so functions +// are documented in comments below. The result setter/getter functions +// are mildly different based on T's destructor type, and are documented later. + +// +// --- API (common to all aws_future<T>) --- +// + +// Create a new future, with refcount of 1. +struct aws_future_T *aws_future_T_new(struct aws_allocator *alloc); + +// Increment the refcount. +// You can pass NULL (has no effect). +// Returns the same pointer that was passed in. +struct aws_future_T *aws_future_T_acquire(struct aws_future_T *future); + +// Decrement the refcount. +// You can pass NULL (has no effect). +// Always returns NULL. +struct aws_future_T *aws_future_T_release(struct aws_future_T *future); + +// Set future as done, with an error_code. +// If the future is already done this call is ignored. +void aws_future_T_set_error(struct aws_future_T *future, int error_code); + +// Return whether the future is done. +bool aws_future_T_is_done(const struct aws_future_T *future); + +// Get the error-code of a completed future. +// If 0 is returned, then the future completed successfully, +// you may now get the result. +// +// WARNING: You MUST NOT call this until the future is done. +int aws_future_T_get_error(const struct aws_future_T *future); + +// Register callback to be invoked when the future completes. +// +// If the future is already done, the callback runs synchronously on the calling thread. +// If the future isn't done yet, the callback is registered, and it +// will run synchronously on whatever thread completes the future. +// +// WARNING: You MUST NOT register more than one callback. +void aws_future_T_register_callback(struct aws_future_T *future, aws_future_callback_fn *on_done, void *user_data); + +// If the future isn't done yet, then register the completion callback. +// +// Returns true if the callback was registered, +// or false if the future is already done. +// +// Use this when you can't risk the callback running synchronously. +// For example: If you're calling an async function repeatedly, +// and synchronous completion could lead to stack overflow due to recursion. +// Or if you are holding a non-recursive mutex, and the callback also +// needs the mutex, and an immediate callback would deadlock. +// +// WARNING: If a callback is registered, you MUST NOT call this again until +// the callback has been invoked. +bool aws_future_T_register_callback_if_not_done( + struct aws_future_T *future, + aws_future_callback_fn *on_done, + void *user_data); + +// Register completion callback to run async on an event-loop thread. +// +// When the future completes, the callback is scheduled to run as an event-loop task. +// +// Use this when you want the callback to run on the event-loop's thread, +// or to ensure the callback runs async even if the future completed synchronously. +// +// WARNING: You MUST NOT register more than one callback. +void aws_future_T_register_event_loop_callback( + struct aws_future_T *future, + struct aws_event_loop *event_loop, + aws_future_callback_fn *on_done, + void *user_data); + +// Register completion callback to run async on an aws_channel's thread. +// +// When the future completes, the callback is scheduled to run as a channel task. +// +// Use this when you want the callback to run on the channel's thread, +// or to ensure the callback runs async even if the future completed synchronously. +// +// WARNING: You MUST NOT register more than one callback. +void aws_future_T_register_channel_callback( + struct aws_future_T *future, + struct aws_channel *channel, + aws_future_callback_fn *on_done, + void *user_data); + +// Wait (up to timeout_ns) for future to complete. +// Returns true if future completes in this time. +// This blocks the current thread, and is probably only useful for tests and sample programs. +bool aws_future_T_wait(struct aws_future_T *future, uint64_t timeout_ns); + +// +// --- Defining new aws_future types --- +// TODO UPDATE THESE DOCS +// To define new types of aws_future<T>, add the appropriate macro to the appropriate header. +// The macros are: +// +// AWS_DECLARE_FUTURE_T_BY_VALUE(FUTURE, T) +// For T stored by value, with no destructor. +// Use with types like bool, size_t, etc +// +// AWS_DECLARE_FUTURE_T_BY_VALUE_WITH_CLEAN_UP(FUTURE, T, CLEAN_UP_FN) +// For T stored by value, with destructor like: void aws_T_clean_up(T*) +// Use with types like `struct aws_byte_buf` +// +// AWS_DECLARE_FUTURE_T_POINTER_WITH_DESTROY(FUTURE, T, DESTROY_FN) +// For T stored by pointer, with destructor like: void aws_T_destroy(T*) +// Use with types like `struct aws_string *` +// +// AWS_DECLARE_FUTURE_T_POINTER_WITH_RELEASE(FUTURE, T, RELEASE_FN) +// For T stored by pointer, with destructor like: T* aws_T_release(T*) +// Use with types like `struct aws_http_message *` +// Note: if T's release() function doesn't return a pointer, use _WITH_DESTROY instead of _WITH_RELEASE. +// +// This file declares several common types: aws_future<size_t>, aws_future<void>, etc. +// But new future types should be declared in the header where that type's API is declared. +// For example: AWS_DECLARE_FUTURE_T_POINTER_WITH_RELEASE(aws_future_http_message, struct aws_http_message) +// would go in: aws-c-http/include/aws/http/request_response.h +// +// The APIs generated by these macros are identical except for the "setter" and "getter" functions. + +// +// --- Design (if you're curious) --- +// +// This class was developed to give the user more control over how the completion +// callback is invoked. In the past, we passed completion callbacks to the async +// function. But this could lead to issues when an async function "sometimes" +// completed synchronously and "sometimes" completed async. The async function +// would need to stress about how to schedule the callback so it was always async, +// or more typically just invoke it whenever and leave the caller to figure it out. +// +// This class is also an experiment with "templates/generics in C". +// In order to make the class type-safe, we use macros to define a unique +// API for each result type T we need to store in a future. +// If we refer to aws_future<struct aws_byte_buf>, we mean a struct named +// aws_future_byte_buf, which stores an aws_byte_buf by value. +// This could lead to code bloat, but the type-safety seems worth it. +// +// future is defined in aws-c-io, instead of aws-c-common, so it can +// easily integrate with aws_event_loop and aws_channel. +// +// It's legal to call set_error() or set_result() multiple times. +// If the future is already done, it ignores the call. +// If result T has a destructor, the new result is immediately freed instead of saved. +// This design lets us deal with ambiguity where it's not 100% certain whether a handoff occurred. +// For example: if we call from C->Java and an exception is thrown, +// it's not clear whether Java got the handoff. In this case, we can safely +// call set_error(), completing the future if necessary, +// or being ignored if the future was already done. + +*/ + +#include <aws/io/io.h> + +AWS_PUSH_SANE_WARNING_LEVEL + +struct aws_channel; +struct aws_event_loop; +struct aws_future_impl; + +/** Completion callback for aws_future<T> */ +typedef void(aws_future_callback_fn)(void *user_data); + +typedef void(aws_future_impl_result_clean_up_fn)(void *result_addr); +typedef void(aws_future_impl_result_destroy_fn)(void *result); +typedef void *(aws_future_impl_result_release_fn)(void *result); + +AWS_EXTERN_C_BEGIN + +AWS_IO_API +struct aws_future_impl *aws_future_impl_new_by_value(struct aws_allocator *alloc, size_t sizeof_result); + +AWS_IO_API +struct aws_future_impl *aws_future_impl_new_by_value_with_clean_up( + struct aws_allocator *alloc, + size_t sizeof_result, + aws_future_impl_result_clean_up_fn *result_clean_up); + +AWS_IO_API +struct aws_future_impl *aws_future_impl_new_pointer(struct aws_allocator *alloc); + +AWS_IO_API +struct aws_future_impl *aws_future_impl_new_pointer_with_destroy( + struct aws_allocator *alloc, + aws_future_impl_result_destroy_fn *result_destroy); + +AWS_IO_API +struct aws_future_impl *aws_future_impl_new_pointer_with_release( + struct aws_allocator *alloc, + aws_future_impl_result_release_fn *result_release); + +AWS_IO_API +struct aws_future_impl *aws_future_impl_release(struct aws_future_impl *promise); + +AWS_IO_API +struct aws_future_impl *aws_future_impl_acquire(struct aws_future_impl *promise); + +AWS_IO_API +void aws_future_impl_set_error(struct aws_future_impl *promise, int error_code); + +AWS_IO_API +void aws_future_impl_set_result_by_move(struct aws_future_impl *promise, void *src_address); + +AWS_IO_API +bool aws_future_impl_is_done(const struct aws_future_impl *future); + +AWS_IO_API +void aws_future_impl_register_callback( + struct aws_future_impl *future, + aws_future_callback_fn *on_done, + void *user_data); + +AWS_IO_API +bool aws_future_impl_register_callback_if_not_done( + struct aws_future_impl *future, + aws_future_callback_fn *on_done, + void *user_data); + +AWS_IO_API +void aws_future_impl_register_event_loop_callback( + struct aws_future_impl *future, + struct aws_event_loop *event_loop, + aws_future_callback_fn *on_done, + void *user_data); + +AWS_IO_API +void aws_future_impl_register_channel_callback( + struct aws_future_impl *future, + struct aws_channel *channel, + aws_future_callback_fn *on_done, + void *user_data); + +AWS_IO_API +bool aws_future_impl_wait(const struct aws_future_impl *future, uint64_t timeout_ns); + +AWS_IO_API +int aws_future_impl_get_error(const struct aws_future_impl *future); + +AWS_IO_API +void *aws_future_impl_get_result_address(const struct aws_future_impl *future); + +AWS_IO_API +void aws_future_impl_get_result_by_move(struct aws_future_impl *future, void *dst_address); + +/* Common beginning to all aws_future<T> declarations */ +#define AWS_FUTURE_T_DECLARATION_BEGIN(FUTURE, API) struct FUTURE; + +/* Common beginning to all aws_future<T> implementations */ +#define AWS_FUTURE_T_IMPLEMENTATION_BEGIN(FUTURE) + +/* Common end to all aws_future<T> declarations */ +#define AWS_FUTURE_T_DECLARATION_END(FUTURE, API) \ + API struct FUTURE *FUTURE##_acquire(struct FUTURE *future); \ + API struct FUTURE *FUTURE##_release(struct FUTURE *future); \ + API void FUTURE##_set_error(struct FUTURE *future, int error_code); \ + API bool FUTURE##_is_done(const struct FUTURE *future); \ + API int FUTURE##_get_error(const struct FUTURE *future); \ + API void FUTURE##_register_callback(struct FUTURE *future, aws_future_callback_fn *on_done, void *user_data); \ + API bool FUTURE##_register_callback_if_not_done( \ + struct FUTURE *future, aws_future_callback_fn *on_done, void *user_data); \ + API void FUTURE##_register_event_loop_callback( \ + struct FUTURE *future, struct aws_event_loop *event_loop, aws_future_callback_fn *on_done, void *user_data); \ + API void FUTURE##_register_channel_callback( \ + struct FUTURE *future, struct aws_channel *channel, aws_future_callback_fn *on_done, void *user_data); \ + API bool FUTURE##_wait(struct FUTURE *future, uint64_t timeout_ns); + +/* Common end to all aws_future<T> implementations */ +#define AWS_FUTURE_T_IMPLEMENTATION_END(FUTURE) \ + struct FUTURE *FUTURE##_acquire(struct FUTURE *future) { \ + return (struct FUTURE *)aws_future_impl_acquire((struct aws_future_impl *)future); \ + } \ + \ + struct FUTURE *FUTURE##_release(struct FUTURE *future) { \ + return (struct FUTURE *)aws_future_impl_release((struct aws_future_impl *)future); \ + } \ + \ + void FUTURE##_set_error(struct FUTURE *future, int error_code) { \ + aws_future_impl_set_error((struct aws_future_impl *)future, error_code); \ + } \ + \ + bool FUTURE##_is_done(const struct FUTURE *future) { \ + return aws_future_impl_is_done((const struct aws_future_impl *)future); \ + } \ + \ + int FUTURE##_get_error(const struct FUTURE *future) { \ + return aws_future_impl_get_error((const struct aws_future_impl *)future); \ + } \ + \ + void FUTURE##_register_callback(struct FUTURE *future, aws_future_callback_fn *on_done, void *user_data) { \ + aws_future_impl_register_callback((struct aws_future_impl *)future, on_done, user_data); \ + } \ + \ + bool FUTURE##_register_callback_if_not_done( \ + struct FUTURE *future, aws_future_callback_fn *on_done, void *user_data) { \ + \ + return aws_future_impl_register_callback_if_not_done((struct aws_future_impl *)future, on_done, user_data); \ + } \ + \ + void FUTURE##_register_event_loop_callback( \ + struct FUTURE *future, struct aws_event_loop *event_loop, aws_future_callback_fn *on_done, void *user_data) { \ + \ + aws_future_impl_register_event_loop_callback( \ + (struct aws_future_impl *)future, event_loop, on_done, user_data); \ + } \ + \ + void FUTURE##_register_channel_callback( \ + struct FUTURE *future, struct aws_channel *channel, aws_future_callback_fn *on_done, void *user_data) { \ + \ + aws_future_impl_register_channel_callback((struct aws_future_impl *)future, channel, on_done, user_data); \ + } \ + \ + bool FUTURE##_wait(struct FUTURE *future, uint64_t timeout_ns) { \ + return aws_future_impl_wait((struct aws_future_impl *)future, timeout_ns); \ + } + +/** + * Declare a future that holds a simple T by value, that needs no destructor. + * Use with types like bool, size_t, etc. + * + * See top of future.h for most API docs. + * The result setters and getters are: + +// Set the result. +// +// If the future is already done this call is ignored. +void aws_future_T_set_result(const struct aws_future_T *future, T result); + +// Get the result of a completed future. +// +// WARNING: You MUST NOT call this until the future is done. +// WARNING: You MUST NOT call this unless get_error() returned 0. +T aws_future_T_get_result(const struct aws_future_T *future); + +*/ +#define AWS_FUTURE_T_BY_VALUE_DECLARATION(FUTURE, T, API) \ + AWS_FUTURE_T_DECLARATION_BEGIN(FUTURE, API) \ + API struct FUTURE *FUTURE##_new(struct aws_allocator *alloc); \ + API void FUTURE##_set_result(struct FUTURE *future, T result); \ + API T FUTURE##_get_result(const struct FUTURE *future); \ + AWS_FUTURE_T_DECLARATION_END(FUTURE, API) + +#define AWS_FUTURE_T_BY_VALUE_IMPLEMENTATION(FUTURE, T) \ + AWS_FUTURE_T_IMPLEMENTATION_BEGIN(FUTURE) \ + struct FUTURE *FUTURE##_new(struct aws_allocator *alloc) { \ + return (struct FUTURE *)aws_future_impl_new_by_value(alloc, sizeof(T)); \ + } \ + \ + void FUTURE##_set_result(struct FUTURE *future, T result) { \ + aws_future_impl_set_result_by_move((struct aws_future_impl *)future, &result); \ + } \ + \ + T FUTURE##_get_result(const struct FUTURE *future) { \ + return *(T *)aws_future_impl_get_result_address((const struct aws_future_impl *)future); \ + } \ + AWS_FUTURE_T_IMPLEMENTATION_END(FUTURE) + +/** + * Declares a future that holds T by value, with destructor like: void aws_T_clean_up(T*) + * Use with types like aws_byte_buf. + * + * See top of future.h for most API docs. + * The result setters and getters are: + +// Set the result, transferring ownership. +// +// The memory at `value_address` is memcpy'd into the future, +// and then zeroed out to help prevent accidental reuse. +// It is safe to call this multiple times. If the future is already done, +// the new result is destroyed instead of saved. +void aws_future_T_set_result_by_move(struct aws_future_T *future, T *value_address); + +// Get the result, transferring ownership. +// +// WARNING: You MUST NOT call this until the future is done. +// WARNING: You MUST NOT call this unless get_error() returned 0. +// WARNING: You MUST NOT call this multiple times. +T aws_future_T_get_result_by_move(struct aws_future_T *future); + +// Get the result, without transferring ownership. +// +// WARNING: You MUST NOT call this until the future is done. +// WARNING: You MUST NOT call this unless get_error() returned 0. +// WARNING: You MUST NOT call this multiple times. +T* aws_future_T_peek_result(const struct aws_future_T *future); + + */ +#define AWS_FUTURE_T_BY_VALUE_WITH_CLEAN_UP_DECLARATION(FUTURE, T, API) \ + AWS_FUTURE_T_DECLARATION_BEGIN(FUTURE, API) \ + API struct FUTURE *FUTURE##_new(struct aws_allocator *alloc); \ + API void FUTURE##_set_result_by_move(struct FUTURE *future, T *value_address); \ + API T *FUTURE##_peek_result(const struct FUTURE *future); \ + API T FUTURE##_get_result_by_move(struct FUTURE *future); \ + AWS_FUTURE_T_DECLARATION_END(FUTURE, API) + +#define AWS_FUTURE_T_BY_VALUE_WITH_CLEAN_UP_IMPLEMENTATION(FUTURE, T, CLEAN_UP_FN) \ + AWS_FUTURE_T_IMPLEMENTATION_BEGIN(FUTURE) \ + \ + struct FUTURE *FUTURE##_new(struct aws_allocator *alloc) { \ + void (*clean_up_fn)(T *) = CLEAN_UP_FN; /* check clean_up() function signature */ \ + return (struct FUTURE *)aws_future_impl_new_by_value_with_clean_up( \ + alloc, sizeof(T), (aws_future_impl_result_clean_up_fn)clean_up_fn); \ + } \ + \ + void FUTURE##_set_result_by_move(struct FUTURE *future, T *value_address) { \ + aws_future_impl_set_result_by_move((struct aws_future_impl *)future, value_address); \ + } \ + \ + T *FUTURE##_peek_result(const struct FUTURE *future) { \ + return aws_future_impl_get_result_address((const struct aws_future_impl *)future); \ + } \ + \ + T FUTURE##_get_result_by_move(struct FUTURE *future) { \ + T value; \ + aws_future_impl_get_result_by_move((struct aws_future_impl *)future, &value); \ + return value; \ + } \ + \ + AWS_FUTURE_T_IMPLEMENTATION_END(FUTURE) + +/** + * Declares a future that holds T*, with no destructor. + */ +#define AWS_FUTURE_T_POINTER_DECLARATION(FUTURE, T, API) \ + AWS_FUTURE_T_DECLARATION_BEGIN(FUTURE, API) \ + API struct FUTURE *FUTURE##_new(struct aws_allocator *alloc); \ + API void FUTURE##_set_result(struct FUTURE *future, T *result); \ + API T *FUTURE##_get_result(const struct FUTURE *future); \ + AWS_FUTURE_T_DECLARATION_END(FUTURE, API) + +#define AWS_FUTURE_T_POINTER_IMPLEMENTATION(FUTURE, T) \ + AWS_FUTURE_T_IMPLEMENTATION_BEGIN(FUTURE) \ + \ + struct FUTURE *FUTURE##_new(struct aws_allocator *alloc) { \ + return (struct FUTURE *)aws_future_impl_new_pointer(alloc); \ + } \ + \ + void FUTURE##_set_result(struct FUTURE *future, T *result) { \ + aws_future_impl_set_result_by_move((struct aws_future_impl *)future, &result); \ + } \ + \ + T *FUTURE##_get_result(const struct FUTURE *future) { \ + return *(T **)aws_future_impl_get_result_address((const struct aws_future_impl *)future); \ + } \ + \ + AWS_FUTURE_T_IMPLEMENTATION_END(FUTURE) + +/** + * Declares a future that holds T*, with destructor like: void aws_T_destroy(T*) + * Use with types like aws_string. + * + * See top of future.h for most API docs. + * The result setters and getters are: + +// Set the result, transferring ownership. +// +// The value at `pointer_address` is copied into the future, +// and then set NULL to prevent accidental reuse. +// If the future is already done, this new result is destroyed instead of saved. +void aws_future_T_set_result_by_move(struct aws_future_T *future, T **pointer_address); + +// Get the result, transferring ownership. +// +// WARNING: You MUST NOT call this until the future is done. +// WARNING: You MUST NOT call this unless get_error() returned 0. +// WARNING: You MUST NOT call this multiple times. +T* aws_future_T_get_result_by_move(struct aws_future_T *future); + +// Get the result, without transferring ownership. +// +// WARNING: You MUST NOT call this until the future is done. +// WARNING: You MUST NOT call this unless get_error() returned 0. +// WARNING: You MUST NOT call this multiple times. +T* aws_future_T_peek_result(const struct aws_future_T *future); + + */ +#define AWS_FUTURE_T_POINTER_WITH_DESTROY_DECLARATION(FUTURE, T, API) \ + AWS_FUTURE_T_DECLARATION_BEGIN(FUTURE, API) \ + API struct FUTURE *FUTURE##_new(struct aws_allocator *alloc); \ + API void FUTURE##_set_result_by_move(struct FUTURE *future, T **pointer_address); \ + API T *FUTURE##_get_result_by_move(struct FUTURE *future); \ + API T *FUTURE##_peek_result(const struct FUTURE *future); \ + AWS_FUTURE_T_DECLARATION_END(FUTURE, API) + +#define AWS_FUTURE_T_POINTER_WITH_DESTROY_IMPLEMENTATION(FUTURE, T, DESTROY_FN) \ + AWS_FUTURE_T_IMPLEMENTATION_BEGIN(FUTURE) \ + \ + struct FUTURE *FUTURE##_new(struct aws_allocator *alloc) { \ + void (*destroy_fn)(T *) = DESTROY_FN; /* check destroy() function signature */ \ + return (struct FUTURE *)aws_future_impl_new_pointer_with_destroy( \ + alloc, (aws_future_impl_result_destroy_fn *)destroy_fn); \ + } \ + \ + void FUTURE##_set_result_by_move(struct FUTURE *future, T **pointer_address) { \ + aws_future_impl_set_result_by_move((struct aws_future_impl *)future, pointer_address); \ + } \ + \ + T *FUTURE##_get_result_by_move(struct FUTURE *future) { \ + T *pointer; \ + aws_future_impl_get_result_by_move((struct aws_future_impl *)future, &pointer); \ + return pointer; \ + } \ + \ + T *FUTURE##_peek_result(const struct FUTURE *future) { \ + return *(T **)aws_future_impl_get_result_address((const struct aws_future_impl *)future); \ + } \ + \ + AWS_FUTURE_T_IMPLEMENTATION_END(FUTURE) + +/** + * Declares a future that holds T*, with destructor like: T* aws_T_release(T*) + * Use with types like aws_http_message + * + * See top of future.h for most API docs. + * The result setters and getters are: + +// Set the result, transferring ownership. +// +// The value at `pointer_address` is copied into the future, +// and then set NULL to prevent accidental reuse. +// If the future is already done, this new result is destroyed instead of saved. +void aws_future_T_set_result_by_move(struct aws_future_T *future, T **pointer_address); + +// Get the result, transferring ownership. +// +// WARNING: You MUST NOT call this until the future is done. +// WARNING: You MUST NOT call this unless get_error() returned 0. +// WARNING: You MUST NOT call this multiple times. +T* aws_future_T_get_result_by_move(struct aws_future_T *future); + +// Get the result, without transferring ownership. +// +// WARNING: You MUST NOT call this until the future is done. +// WARNING: You MUST NOT call this unless get_error() returned 0. +// WARNING: You MUST NOT call this multiple times. +T* aws_future_T_peek_result(const struct aws_future_T *future); + + */ +#define AWS_FUTURE_T_POINTER_WITH_RELEASE_DECLARATION(FUTURE, T, API) \ + AWS_FUTURE_T_DECLARATION_BEGIN(FUTURE, API) \ + API struct FUTURE *FUTURE##_new(struct aws_allocator *alloc); \ + API void FUTURE##_set_result_by_move(struct FUTURE *future, T **pointer_address); \ + API T *FUTURE##_get_result_by_move(struct FUTURE *future); \ + API T *FUTURE##_peek_result(const struct FUTURE *future); \ + AWS_FUTURE_T_DECLARATION_END(FUTURE, API) + +#define AWS_FUTURE_T_POINTER_WITH_RELEASE_IMPLEMENTATION(FUTURE, T, RELEASE_FN) \ + AWS_FUTURE_T_IMPLEMENTATION_BEGIN(FUTURE) \ + \ + struct FUTURE *FUTURE##_new(struct aws_allocator *alloc) { \ + T *(*release_fn)(T *) = RELEASE_FN; /* check release() function signature */ \ + return (struct FUTURE *)aws_future_impl_new_pointer_with_release( \ + alloc, (aws_future_impl_result_release_fn *)release_fn); \ + } \ + \ + void FUTURE##_set_result_by_move(struct FUTURE *future, T **pointer_address) { \ + aws_future_impl_set_result_by_move((struct aws_future_impl *)future, pointer_address); \ + } \ + \ + T *FUTURE##_get_result_by_move(struct FUTURE *future) { \ + T *pointer; \ + aws_future_impl_get_result_by_move((struct aws_future_impl *)future, &pointer); \ + return pointer; \ + } \ + \ + T *FUTURE##_peek_result(const struct FUTURE *future) { \ + return *(T **)aws_future_impl_get_result_address((const struct aws_future_impl *)future); \ + } \ + \ + AWS_FUTURE_T_IMPLEMENTATION_END(FUTURE) + +/** + * aws_future<size_t> + */ +AWS_FUTURE_T_BY_VALUE_DECLARATION(aws_future_size, size_t, AWS_IO_API) + +/** + * aws_future<bool> + */ +AWS_FUTURE_T_BY_VALUE_DECLARATION(aws_future_bool, bool, AWS_IO_API) + +/** + * aws_future<void> + */ +AWS_FUTURE_T_DECLARATION_BEGIN(aws_future_void, AWS_IO_API) + +AWS_IO_API struct aws_future_void *aws_future_void_new(struct aws_allocator *alloc); + +AWS_IO_API void aws_future_void_set_result(struct aws_future_void *future); + +AWS_FUTURE_T_DECLARATION_END(aws_future_void, AWS_IO_API) + +AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL + +#endif /* AWS_IO_FUTURE_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/host_resolver.h b/contrib/restricted/aws/aws-c-io/include/aws/io/host_resolver.h index 6be0245e994..8b482422462 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/host_resolver.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/host_resolver.h @@ -8,6 +8,8 @@ #include <aws/common/ref_count.h> #include <aws/io/io.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_event_loop_group; enum aws_address_record_type { @@ -272,5 +274,6 @@ AWS_IO_API size_t aws_host_resolver_get_host_address_count( AWS_IO_API struct aws_host_resolution_config aws_host_resolver_init_default_resolution_config(void); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_HOST_RESOLVER_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/io.h b/contrib/restricted/aws/aws-c-io/include/aws/io/io.h index 2dfd392dd47..011e1a779f7 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/io.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/io.h @@ -10,6 +10,8 @@ #include <aws/common/linked_list.h> #include <aws/io/exports.h> +AWS_PUSH_SANE_WARNING_LEVEL + #define AWS_C_IO_PACKAGE_ID 1 struct aws_io_handle { @@ -248,6 +250,10 @@ enum aws_io_errors { AWS_IO_STREAM_SEEK_UNSUPPORTED, AWS_IO_STREAM_GET_LENGTH_UNSUPPORTED, + AWS_IO_TLS_ERROR_READ_FAILURE, + + AWS_ERROR_PEM_MALFORMED, + AWS_IO_ERROR_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_IO_PACKAGE_ID), AWS_IO_INVALID_FILE_HANDLE = AWS_ERROR_INVALID_FILE_HANDLE, }; @@ -271,5 +277,6 @@ AWS_IO_API void aws_io_fatal_assert_library_initialized(void); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_IO_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/logging.h b/contrib/restricted/aws/aws-c-io/include/aws/io/logging.h index a95e1d4acde..a3bbee2fa6e 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/logging.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/logging.h @@ -10,6 +10,8 @@ #include <aws/common/logging.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_log_channel; struct aws_log_formatter; struct aws_log_writer; @@ -30,7 +32,9 @@ enum aws_io_log_subject { AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY, AWS_LS_IO_STANDARD_RETRY_STRATEGY, AWS_LS_IO_PKCS11, + AWS_LS_IO_PEM, AWS_IO_LS_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_IO_PACKAGE_ID) }; +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_LOGGING_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/message_pool.h b/contrib/restricted/aws/aws-c-io/include/aws/io/message_pool.h index 4b1e809c90c..738627a3997 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/message_pool.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/message_pool.h @@ -7,6 +7,8 @@ #include <aws/common/array_list.h> #include <aws/io/io.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_memory_pool { struct aws_allocator *alloc; struct aws_array_list stack; @@ -82,5 +84,6 @@ AWS_IO_API void aws_message_pool_release(struct aws_message_pool *msg_pool, struct aws_io_message *message); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_MESSAGE_POOL_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/pem.h b/contrib/restricted/aws/aws-c-io/include/aws/io/pem.h new file mode 100644 index 00000000000..0a21cc0c6de --- /dev/null +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/pem.h @@ -0,0 +1,99 @@ +#ifndef AWS_IO_PEM_H +#define AWS_IO_PEM_H + +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/io/io.h> + +AWS_EXTERN_C_BEGIN + +/* + * Naming follows OpenSSL convention for PEM types. + * Refer to comment after each enum value for the type string it represents. + */ +enum aws_pem_object_type { + AWS_PEM_TYPE_UNKNOWN = 0, + AWS_PEM_TYPE_X509_OLD, /* X509 CERTIFICATE */ + AWS_PEM_TYPE_X509, /* CERTIFICATE */ + AWS_PEM_TYPE_X509_TRUSTED, /* TRUSTED CERTIFICATE */ + AWS_PEM_TYPE_X509_REQ_OLD, /* NEW CERTIFICATE REQUEST */ + AWS_PEM_TYPE_X509_REQ, /* CERTIFICATE REQUEST */ + AWS_PEM_TYPE_X509_CRL, /* X509 CRL */ + AWS_PEM_TYPE_EVP_PKEY, /* ANY PRIVATE KEY */ + AWS_PEM_TYPE_PUBLIC_PKCS8, /* PUBLIC KEY */ + AWS_PEM_TYPE_PRIVATE_RSA_PKCS1, /* RSA PRIVATE KEY */ + AWS_PEM_TYPE_PUBLIC_RSA_PKCS1, /* RSA PUBLIC KEY */ + AWS_PEM_TYPE_PRIVATE_DSA_PKCS1, /* RSA PRIVATE KEY */ + AWS_PEM_TYPE_PUBLIC_DSA_PKCS1, /* RSA PUBLIC KEY */ + AWS_PEM_TYPE_PKCS7, /* PKCS7 */ + AWS_PEM_TYPE_PKCS7_SIGNED_DATA, /* PKCS #7 SIGNED DATA */ + AWS_PEM_TYPE_PRIVATE_PKCS8_ENCRYPTED, /* ENCRYPTED PRIVATE KEY */ + AWS_PEM_TYPE_PRIVATE_PKCS8, /* PRIVATE KEY */ + AWS_PEM_TYPE_DH_PARAMETERS, /* X9.42 DH PARAMETERS */ + AWS_PEM_TYPE_DH_PARAMETERS_X942, /* X9.42 DH PARAMETERS */ + AWS_PEM_TYPE_SSL_SESSION_PARAMETERS, /* SSL SESSION PARAMETERS */ + AWS_PEM_TYPE_DSA_PARAMETERS, /* DSA PARAMETERS */ + AWS_PEM_TYPE_ECDSA_PUBLIC, /* ECDSA PUBLIC KEY */ + AWS_PEM_TYPE_EC_PARAMETERS, /* EC PARAMETERS */ + AWS_PEM_TYPE_EC_PRIVATE, /* EC PRIVATE KEY */ + AWS_PEM_TYPE_PARAMETERS, /* PARAMETERS */ + AWS_PEM_TYPE_CMS, /* CMS */ + AWS_PEM_TYPE_SM2_PARAMETERS /* SM2 PARAMETERS */ +}; + +/* + * Describes PEM object decoded from file. + * data points to raw data bytes of object (decoding will do additional base 64 + * decoding for each object). + * type will be set to object type or to AWS_PEM_TYPE_UNKNOWN if it could not + * figure out type. + * type_string is the string between -----BEGIN and ----- + */ +struct aws_pem_object { + enum aws_pem_object_type type; + struct aws_string *type_string; + struct aws_byte_buf data; +}; + +/** + * Cleans up elements of pem_objects list 'aws_pem_objects_init_from_file_contents()' + * and 'aws_pem_objects_init_from_file_path()'. + */ +AWS_IO_API void aws_pem_objects_clean_up(struct aws_array_list *pem_objects); + +/** + * Decodes PEM data and reads objects sequentially adding them to pem_objects. + * If it comes across an object it cannot read, list of all object read until + * that point is returned. + * If no objects can be read from PEM or objects could not be base 64 decoded, + * AWS_ERROR_PEM_MALFORMED is raised. + * out_pem_objects stores aws_pem_object struct by value. + * Function will initialize pem_objects list. + * This code is slow, and it allocates, so please try + * not to call this in the middle of something that needs to be fast or resource sensitive. + */ +AWS_IO_API int aws_pem_objects_init_from_file_contents( + struct aws_array_list *pem_objects, + struct aws_allocator *alloc, + struct aws_byte_cursor pem_cursor); + +/** + * Decodes PEM data from file and reads objects sequentially adding them to pem_objects. + * If it comes across an object it cannot read, list of all object read until + * that point is returned. + * If no objects can be read from PEM or objects could not be base 64 decoded, + * AWS_ERROR_PEM_MALFORMED is raised. + * out_pem_objects stores aws_pem_object struct by value. + * Function will initialize pem_objects list. + * This code is slow, and it allocates, so please try + * not to call this in the middle of something that needs to be fast or resource sensitive. + */ +AWS_IO_API int aws_pem_objects_init_from_file_path( + struct aws_array_list *pem_objects, + struct aws_allocator *allocator, + const char *filename); + +AWS_EXTERN_C_END +#endif /* AWS_IO_PEM_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/pipe.h b/contrib/restricted/aws/aws-c-io/include/aws/io/pipe.h index 5075650547e..ab9746cc421 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/pipe.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/pipe.h @@ -9,6 +9,8 @@ #include <aws/common/byte_buf.h> #include <aws/io/io.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_event_loop; struct aws_pipe_read_end { @@ -144,5 +146,6 @@ int aws_pipe_get_unique_name(char *dst, size_t dst_size); #endif AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_PIPE_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/pkcs11.h b/contrib/restricted/aws/aws-c-io/include/aws/io/pkcs11.h index 862f0631040..e23192f1f3c 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/pkcs11.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/pkcs11.h @@ -6,6 +6,8 @@ */ #include <aws/io/io.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_allocator; /** @@ -90,5 +92,6 @@ AWS_IO_API void aws_pkcs11_lib_release(struct aws_pkcs11_lib *pkcs11_lib); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_PKCS11_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/private/pki_utils.h b/contrib/restricted/aws/aws-c-io/include/aws/io/private/pki_utils.h index ec1c2c62b26..ac5c4c0ff56 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/private/pki_utils.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/private/pki_utils.h @@ -23,42 +23,12 @@ struct aws_string; AWS_EXTERN_C_BEGIN /** - * Cleans up and securely zeroes out the outputs of 'aws_decode_pem_to_buffer_list()' - * and 'aws_read_and_decode_pem_file_to_buffer_list()' - */ -AWS_IO_API void aws_cert_chain_clean_up(struct aws_array_list *cert_chain); - -/** - * Decodes a PEM file and adds the results to 'cert_chain_or_key' if successful. - * Otherwise, 'cert_chain_or_key' will be empty. The type stored in 'cert_chain_or_key' - * is 'struct aws_byte_buf' by value. This code is slow, and it allocates, so please try - * not to call this in the middle of something that needs to be fast or resource sensitive. - */ -AWS_IO_API int aws_decode_pem_to_buffer_list( - struct aws_allocator *alloc, - const struct aws_byte_cursor *pem_cursor, - struct aws_array_list *cert_chain_or_key); - -/** * Returns the path to the directory and file, respectively, which holds the * SSL certificate trust store on the system. */ AWS_IO_API const char *aws_determine_default_pki_dir(void); AWS_IO_API const char *aws_determine_default_pki_ca_file(void); -/** - * Decodes a PEM file at 'filename' and adds the results to 'cert_chain_or_key' if successful. - * Otherwise, 'cert_chain_or_key' will be empty. - * The passed-in parameter 'cert_chain_or_key' should be empty and dynamically initialized array_list - * with item type 'struct aws_byte_buf' in value. - * This code is slow, and it allocates, so please try not to call this in the middle of - * something that needs to be fast or resource sensitive. - */ -AWS_IO_API int aws_read_and_decode_pem_file_to_buffer_list( - struct aws_allocator *alloc, - const char *filename, - struct aws_array_list *cert_chain_or_key); - #ifdef AWS_OS_APPLE # if !defined(AWS_OS_IOS) /** diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/private/tracing.h b/contrib/restricted/aws/aws-c-io/include/aws/io/private/tracing.h new file mode 100644 index 00000000000..99b5432619d --- /dev/null +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/private/tracing.h @@ -0,0 +1,22 @@ +#ifndef AWS_IO_TRACING_H +#define AWS_IO_TRACING_H + +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/common/external/ittnotify.h> +#include <aws/io/io.h> + +extern __itt_domain *io_tracing_domain; +extern __itt_string_handle *tracing_input_stream_read; +extern __itt_string_handle *tracing_event_loop_run_tasks; +extern __itt_string_handle *tracing_event_loop_event; +extern __itt_string_handle *tracing_event_loop_events; + +AWS_EXTERN_C_BEGIN + +void aws_io_tracing_init(void); + +AWS_EXTERN_C_END +#endif /* AWS_IO_TRACING_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/retry_strategy.h b/contrib/restricted/aws/aws-c-io/include/aws/io/retry_strategy.h index 4d7a26f7e35..3d63c35e61a 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/retry_strategy.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/retry_strategy.h @@ -9,6 +9,8 @@ #include <aws/common/atomics.h> #include <aws/common/byte_buf.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_retry_strategy; struct aws_retry_token; struct aws_event_loop_group; @@ -108,8 +110,10 @@ struct aws_exponential_backoff_retry_options { struct aws_event_loop_group *el_group; /** Max retries to allow. The default value is 10 */ size_t max_retries; - /** Scaling factor to add for the backoff. Default is 25ms */ + /** Scaling factor to add for the backoff. Default is 500ms */ uint32_t backoff_scale_factor_ms; + /** Max retry backoff in seconds. Default is 20 seconds */ + uint32_t max_backoff_secs; /** Jitter mode to use, see comments for aws_exponential_backoff_jitter_mode. * Default is AWS_EXPONENTIAL_BACKOFF_JITTER_DEFAULT */ enum aws_exponential_backoff_jitter_mode jitter_mode; @@ -232,5 +236,6 @@ AWS_IO_API struct aws_retry_strategy *aws_retry_strategy_new_standard( const struct aws_standard_retry_options *config); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_CLIENT_RETRY_STRATEGY_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/shared_library.h b/contrib/restricted/aws/aws-c-io/include/aws/io/shared_library.h index 323afed606f..c00eb98af37 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/shared_library.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/shared_library.h @@ -8,6 +8,8 @@ #include <aws/io/io.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_byte_cursor; /* @@ -44,5 +46,6 @@ int aws_shared_library_find_function( aws_generic_function *function_address); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_SHARED_LIBRARY_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/socket.h b/contrib/restricted/aws/aws-c-io/include/aws/io/socket.h index b4f3200803b..a6223b05e1e 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/socket.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/socket.h @@ -8,6 +8,8 @@ #include <aws/io/channel.h> #include <aws/io/io.h> +AWS_PUSH_SANE_WARNING_LEVEL + enum aws_socket_domain { AWS_SOCKET_IPV4, AWS_SOCKET_IPV6, @@ -96,7 +98,7 @@ typedef void(aws_socket_on_readable_fn)(struct aws_socket *socket, int error_cod #endif struct aws_socket_endpoint { char address[AWS_ADDRESS_MAX_LEN]; - uint16_t port; + uint32_t port; }; struct aws_socket { @@ -300,6 +302,29 @@ AWS_IO_API int aws_socket_get_error(struct aws_socket *socket); */ AWS_IO_API bool aws_socket_is_open(struct aws_socket *socket); +/** + * Raises AWS_IO_SOCKET_INVALID_ADDRESS and logs an error if connecting to this port is illegal. + * For example, port must be in range 1-65535 to connect with IPv4. + * These port values would fail eventually in aws_socket_connect(), + * but you can use this function to validate earlier. + */ +AWS_IO_API int aws_socket_validate_port_for_connect(uint32_t port, enum aws_socket_domain domain); + +/** + * Raises AWS_IO_SOCKET_INVALID_ADDRESS and logs an error if binding to this port is illegal. + * For example, port must in range 0-65535 to bind with IPv4. + * These port values would fail eventually in aws_socket_bind(), + * but you can use this function to validate earlier. + */ +AWS_IO_API int aws_socket_validate_port_for_bind(uint32_t port, enum aws_socket_domain domain); + +/** + * Assigns a random address (UUID) for use with AWS_SOCKET_LOCAL (Unix Domain Sockets). + * For use in internal tests only. + */ +AWS_IO_API void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint *endpoint); + AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_SOCKET_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/socket_channel_handler.h b/contrib/restricted/aws/aws-c-io/include/aws/io/socket_channel_handler.h index e837b07849c..a9f6a23cb89 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/socket_channel_handler.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/socket_channel_handler.h @@ -7,6 +7,8 @@ #include <aws/io/io.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_socket; struct aws_channel_handler; struct aws_channel_slot; @@ -28,5 +30,6 @@ AWS_IO_API struct aws_channel_handler *aws_socket_handler_new( AWS_IO_API const struct aws_socket *aws_socket_handler_get_socket(const struct aws_channel_handler *handler); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_SOCKET_CHANNEL_HANDLER_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/statistics.h b/contrib/restricted/aws/aws-c-io/include/aws/io/statistics.h index ce63a7ca374..62217e1764f 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/statistics.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/statistics.h @@ -11,6 +11,8 @@ #include <aws/common/statistics.h> #include <aws/io/tls_channel_handler.h> +AWS_PUSH_SANE_WARNING_LEVEL + enum aws_crt_io_statistics_category { AWSCRT_STAT_CAT_SOCKET = AWS_CRT_STATISTICS_CATEGORY_BEGIN_RANGE(AWS_C_IO_PACKAGE_ID), AWSCRT_STAT_CAT_TLS, @@ -74,5 +76,6 @@ AWS_IO_API void aws_crt_statistics_tls_reset(struct aws_crt_statistics_tls *stats); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_STATISTICS_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/stream.h b/contrib/restricted/aws/aws-c-io/include/aws/io/stream.h index ff86de970a5..711537d02a6 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/stream.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/stream.h @@ -9,6 +9,8 @@ #include <aws/common/ref_count.h> #include <aws/io/io.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_input_stream; struct aws_byte_buf; @@ -129,5 +131,6 @@ AWS_IO_API struct aws_input_stream *aws_input_stream_new_from_file( AWS_IO_API struct aws_input_stream *aws_input_stream_new_from_open_file(struct aws_allocator *allocator, FILE *file); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_STREAM_H */ diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/tls_channel_handler.h b/contrib/restricted/aws/aws-c-io/include/aws/io/tls_channel_handler.h index bb0eed7d395..9c35864a17f 100644 --- a/contrib/restricted/aws/aws-c-io/include/aws/io/tls_channel_handler.h +++ b/contrib/restricted/aws/aws-c-io/include/aws/io/tls_channel_handler.h @@ -8,6 +8,10 @@ #include <aws/common/ref_count.h> #include <aws/io/io.h> +AWS_PUSH_SANE_WARNING_LEVEL + +#define AWS_TLS_NEGOTIATED_PROTOCOL_MESSAGE 0x01 + struct aws_channel_slot; struct aws_channel_handler; struct aws_pkcs11_session; @@ -254,8 +258,6 @@ struct aws_tls_negotiated_protocol_message { struct aws_byte_buf protocol; }; -static const int AWS_TLS_NEGOTIATED_PROTOCOL_MESSAGE = 0x01; - typedef struct aws_channel_handler *( *aws_tls_on_protocol_negotiated)(struct aws_channel_slot *new_slot, struct aws_byte_buf *protocol, void *user_data); @@ -802,18 +804,6 @@ AWS_IO_API struct aws_tls_ctx *aws_tls_ctx_acquire(struct aws_tls_ctx *ctx); AWS_IO_API void aws_tls_ctx_release(struct aws_tls_ctx *ctx); /** - * Not necessary if you are installing more handlers into the channel, but if you just want to have TLS for arbitrary - * data and use the channel handler directly, this function allows you to write data to the channel and have it - * encrypted. - */ -AWS_IO_API int aws_tls_handler_write( - struct aws_channel_handler *handler, - struct aws_channel_slot *slot, - struct aws_byte_buf *buf, - aws_channel_on_message_write_completed_fn *on_write_completed, - void *completion_user_data); - -/** * Returns a byte buffer by copy of the negotiated protocols. If there is no agreed upon protocol, len will be 0 and * buffer will be NULL. */ @@ -914,5 +904,6 @@ AWS_IO_API const char *aws_tls_key_operation_type_str(enum aws_tls_key_operation_type operation_type); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_TLS_CHANNEL_HANDLER_H */ diff --git a/contrib/restricted/aws/aws-c-io/source/async_stream.c b/contrib/restricted/aws/aws-c-io/source/async_stream.c new file mode 100644 index 00000000000..6422bb84705 --- /dev/null +++ b/contrib/restricted/aws/aws-c-io/source/async_stream.c @@ -0,0 +1,153 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include <aws/io/async_stream.h> + +#include <aws/common/byte_buf.h> +#include <aws/io/future.h> +#include <aws/io/stream.h> + +void aws_async_input_stream_init_base( + struct aws_async_input_stream *stream, + struct aws_allocator *alloc, + const struct aws_async_input_stream_vtable *vtable, + void *impl) { + + AWS_PRECONDITION(stream); + AWS_PRECONDITION(alloc); + AWS_PRECONDITION(vtable); + AWS_PRECONDITION(vtable->read); + AWS_PRECONDITION(vtable->destroy); + + AWS_ZERO_STRUCT(*stream); + stream->alloc = alloc; + stream->vtable = vtable; + stream->impl = impl; + aws_ref_count_init(&stream->ref_count, stream, (aws_simple_completion_callback *)vtable->destroy); +} + +struct aws_async_input_stream *aws_async_input_stream_acquire(struct aws_async_input_stream *stream) { + if (stream != NULL) { + aws_ref_count_acquire(&stream->ref_count); + } + return stream; +} + +struct aws_async_input_stream *aws_async_input_stream_release(struct aws_async_input_stream *stream) { + if (stream) { + aws_ref_count_release(&stream->ref_count); + } + return NULL; +} + +struct aws_future_bool *aws_async_input_stream_read(struct aws_async_input_stream *stream, struct aws_byte_buf *dest) { + AWS_PRECONDITION(stream); + AWS_PRECONDITION(dest); + + /* Ensure the buffer has space available */ + if (dest->len == dest->capacity) { + struct aws_future_bool *future = aws_future_bool_new(stream->alloc); + aws_future_bool_set_error(future, AWS_ERROR_SHORT_BUFFER); + return future; + } + + struct aws_future_bool *future = stream->vtable->read(stream, dest); + AWS_POSTCONDITION(future != NULL); + return future; +} + +/* Data to perform the aws_async_input_stream_read_to_fill() job */ +struct aws_async_input_stream_fill_job { + struct aws_allocator *alloc; + struct aws_async_input_stream *stream; + struct aws_byte_buf *dest; + /* Future for each read() step */ + struct aws_future_bool *read_step_future; + /* Future to set when this fill job completes */ + struct aws_future_bool *on_complete_future; +}; + +static void s_async_stream_fill_job_complete( + struct aws_async_input_stream_fill_job *fill_job, + bool eof, + int error_code) { + + if (error_code) { + aws_future_bool_set_error(fill_job->on_complete_future, error_code); + } else { + aws_future_bool_set_result(fill_job->on_complete_future, eof); + } + aws_future_bool_release(fill_job->on_complete_future); + aws_async_input_stream_release(fill_job->stream); + aws_mem_release(fill_job->alloc, fill_job); +} + +/* Call read() in a loop. + * It would be simpler to set a completion callback for each read() call, + * but this risks our call stack growing large if there are many small, synchronous, reads. + * So be complicated and loop until a read() ) call is actually async, + * and only then set the completion callback (which is this same function, where we resume looping). */ +static void s_async_stream_fill_job_loop(void *user_data) { + struct aws_async_input_stream_fill_job *fill_job = user_data; + + while (true) { + /* Process read_step_future from previous iteration of loop. + * It's NULL the first time the job ever enters the loop. + * But it's set in subsequent runs of the loop, + * and when this is a read_step_future completion callback. */ + if (fill_job->read_step_future) { + if (aws_future_bool_register_callback_if_not_done( + fill_job->read_step_future, s_async_stream_fill_job_loop, fill_job)) { + + /* not done, we'll resume this loop when callback fires */ + return; + } + + /* read_step_future is done */ + int error_code = aws_future_bool_get_error(fill_job->read_step_future); + bool eof = error_code ? false : aws_future_bool_get_result(fill_job->read_step_future); + bool reached_capacity = fill_job->dest->len == fill_job->dest->capacity; + fill_job->read_step_future = aws_future_bool_release(fill_job->read_step_future); /* release and NULL */ + + if (error_code || eof || reached_capacity) { + /* job complete! */ + s_async_stream_fill_job_complete(fill_job, eof, error_code); + return; + } + } + + /* Kick off a read, which may or may not complete async */ + fill_job->read_step_future = aws_async_input_stream_read(fill_job->stream, fill_job->dest); + } +} + +struct aws_future_bool *aws_async_input_stream_read_to_fill( + struct aws_async_input_stream *stream, + struct aws_byte_buf *dest) { + + AWS_PRECONDITION(stream); + AWS_PRECONDITION(dest); + + struct aws_future_bool *future = aws_future_bool_new(stream->alloc); + + /* Ensure the buffer has space available */ + if (dest->len == dest->capacity) { + aws_future_bool_set_error(future, AWS_ERROR_SHORT_BUFFER); + return future; + } + + /* Prepare for async job */ + struct aws_async_input_stream_fill_job *fill_job = + aws_mem_calloc(stream->alloc, 1, sizeof(struct aws_async_input_stream_fill_job)); + fill_job->alloc = stream->alloc; + fill_job->stream = aws_async_input_stream_acquire(stream); + fill_job->dest = dest; + fill_job->on_complete_future = aws_future_bool_acquire(future); + + /* Kick off work */ + s_async_stream_fill_job_loop(fill_job); + + return future; +} diff --git a/contrib/restricted/aws/aws-c-io/source/bsd/kqueue_event_loop.c b/contrib/restricted/aws/aws-c-io/source/bsd/kqueue_event_loop.c index 43130fa4971..33a517e7b9b 100644 --- a/contrib/restricted/aws/aws-c-io/source/bsd/kqueue_event_loop.c +++ b/contrib/restricted/aws/aws-c-io/source/bsd/kqueue_event_loop.c @@ -7,6 +7,7 @@ #include <aws/io/logging.h> +#include <aws/cal/cal.h> #include <aws/common/atomics.h> #include <aws/common/clock.h> #include <aws/common/mutex.h> @@ -821,6 +822,12 @@ static int aws_event_loop_listen_for_io_events(int kq_fd, struct kevent kevents[ return kevent(kq_fd, NULL /*changelist*/, 0 /*nchanges*/, kevents /*eventlist*/, MAX_EVENTS /*nevents*/, timeout); } +static void s_aws_kqueue_cleanup_aws_lc_thread_local_state(void *user_data) { + (void)user_data; + + aws_cal_thread_clean_up(); +} + static void aws_event_loop_thread(void *user_data) { struct aws_event_loop *event_loop = user_data; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: main loop started", (void *)event_loop); @@ -852,6 +859,8 @@ static void aws_event_loop_thread(void *user_data) { DEFAULT_TIMEOUT_SEC, MAX_EVENTS); + aws_thread_current_at_exit(s_aws_kqueue_cleanup_aws_lc_thread_local_state, NULL); + while (impl->thread_data.state == EVENT_THREAD_STATE_RUNNING) { int num_io_handle_events = 0; bool should_process_cross_thread_data = false; diff --git a/contrib/restricted/aws/aws-c-io/source/channel.c b/contrib/restricted/aws/aws-c-io/source/channel.c index b741513ccda..36a3975b2ea 100644 --- a/contrib/restricted/aws/aws-c-io/source/channel.c +++ b/contrib/restricted/aws/aws-c-io/source/channel.c @@ -743,7 +743,7 @@ int aws_channel_slot_insert_end(struct aws_channel *channel, struct aws_channel_ } AWS_ASSERT(0); - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_INVALID_STATE); } int aws_channel_slot_insert_left(struct aws_channel_slot *slot, struct aws_channel_slot *to_add) { diff --git a/contrib/restricted/aws/aws-c-io/source/channel_bootstrap.c b/contrib/restricted/aws/aws-c-io/source/channel_bootstrap.c index 4eac9174578..2ccd3873aca 100644 --- a/contrib/restricted/aws/aws-c-io/source/channel_bootstrap.c +++ b/contrib/restricted/aws/aws-c-io/source/channel_bootstrap.c @@ -118,7 +118,7 @@ struct client_connection_args { aws_client_bootstrap_on_channel_event_fn *shutdown_callback; struct client_channel_data channel_data; struct aws_socket_options outgoing_options; - uint16_t outgoing_port; + uint32_t outgoing_port; struct aws_string *host_name; void *user_data; uint8_t addresses_count; @@ -182,10 +182,14 @@ static struct aws_event_loop *s_get_connection_event_loop(struct client_connecti return aws_event_loop_group_get_next_loop(args->bootstrap->event_loop_group); } -static void s_connection_args_setup_callback( +static void s_connect_args_setup_callback_safe( struct client_connection_args *args, int error_code, struct aws_channel *channel) { + + AWS_FATAL_ASSERT( + (args->requested_event_loop == NULL) || aws_event_loop_thread_is_callers_thread(args->requested_event_loop)); + /* setup_callback is always called exactly once */ AWS_FATAL_ASSERT(!args->setup_called); @@ -200,6 +204,75 @@ static void s_connection_args_setup_callback( s_client_connection_args_release(args); } +struct aws_connection_args_setup_callback_task { + struct aws_allocator *allocator; + struct aws_task task; + struct client_connection_args *args; + int error_code; + struct aws_channel *channel; +}; + +static void s_aws_connection_args_setup_callback_task_delete(struct aws_connection_args_setup_callback_task *task) { + if (task == NULL) { + return; + } + + s_client_connection_args_release(task->args); + if (task->channel) { + aws_channel_release_hold(task->channel); + } + + aws_mem_release(task->allocator, task); +} + +void s_aws_connection_args_setup_callback_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)task; + + struct aws_connection_args_setup_callback_task *callback_task = arg; + + if (status == AWS_TASK_STATUS_RUN_READY) { + s_connect_args_setup_callback_safe(callback_task->args, callback_task->error_code, callback_task->channel); + } + + s_aws_connection_args_setup_callback_task_delete(callback_task); +} + +static struct aws_connection_args_setup_callback_task *s_aws_connection_args_setup_callback_task_new( + struct aws_allocator *allocator, + struct client_connection_args *args, + int error_code, + struct aws_channel *channel) { + + struct aws_connection_args_setup_callback_task *task = + aws_mem_calloc(allocator, 1, sizeof(struct aws_connection_args_setup_callback_task)); + task->allocator = allocator; + task->args = s_client_connection_args_acquire(args); + task->error_code = error_code; + task->channel = channel; + if (channel != NULL) { + aws_channel_acquire_hold(channel); + } + + aws_task_init( + &task->task, s_aws_connection_args_setup_callback_task_fn, task, "safe connection args setup callback"); + + return task; +} + +static void s_connection_args_setup_callback( + struct client_connection_args *args, + int error_code, + struct aws_channel *channel) { + + if (args->requested_event_loop == NULL || aws_event_loop_thread_is_callers_thread(args->requested_event_loop)) { + s_connect_args_setup_callback_safe(args, error_code, channel); + } else { + struct aws_connection_args_setup_callback_task *callback_task = + s_aws_connection_args_setup_callback_task_new(args->bootstrap->allocator, args, error_code, channel); + aws_event_loop_schedule_task_now(args->requested_event_loop, &callback_task->task); + } +} + static void s_connection_args_creation_callback(struct client_connection_args *args, struct aws_channel *channel) { AWS_FATAL_ASSERT(channel != NULL); @@ -567,10 +640,6 @@ static void s_attempt_connection(struct aws_task *task, void *arg, enum aws_task } struct aws_socket *outgoing_socket = aws_mem_acquire(allocator, sizeof(struct aws_socket)); - if (!outgoing_socket) { - goto socket_alloc_failed; - } - if (aws_socket_init(outgoing_socket, allocator, &task_data->options)) { goto socket_init_failed; } @@ -592,19 +661,27 @@ socket_connect_failed: aws_socket_clean_up(outgoing_socket); socket_init_failed: aws_mem_release(allocator, outgoing_socket); -socket_alloc_failed: - err_code = aws_last_error(); - AWS_LOGF_ERROR( - AWS_LS_IO_CHANNEL_BOOTSTRAP, - "id=%p: failed to create socket with error %d", - (void *)task_data->args->bootstrap, - err_code); task_cancelled: + err_code = aws_last_error(); task_data->args->failed_count++; /* if this is the last attempted connection and it failed, notify the user */ if (task_data->args->failed_count == task_data->args->addresses_count) { + AWS_LOGF_ERROR( + AWS_LS_IO_CHANNEL_BOOTSTRAP, + "id=%p: Last attempt failed to create socket with error %d", + (void *)task_data->args->bootstrap, + err_code); s_connection_args_setup_callback(task_data->args, err_code, NULL); + } else { + AWS_LOGF_DEBUG( + AWS_LS_IO_CHANNEL_BOOTSTRAP, + "id=%p: Socket connect attempt %d/%d failed with error %d. More attempts ongoing...", + (void *)task_data->args->bootstrap, + task_data->args->failed_count, + task_data->args->addresses_count, + err_code); } + s_client_connection_args_release(task_data->args); cleanup_task: @@ -760,14 +837,14 @@ int aws_client_bootstrap_new_socket_channel(struct aws_socket_channel_bootstrap_ } const char *host_name = options->host_name; - uint16_t port = options->port; + uint32_t port = options->port; AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL_BOOTSTRAP, - "id=%p: attempting to initialize a new client channel to %s:%d", + "id=%p: attempting to initialize a new client channel to %s:%u", (void *)bootstrap, host_name, - (int)port); + port); aws_ref_count_init( &client_connection_args->ref_count, @@ -1359,10 +1436,10 @@ struct aws_socket *aws_server_bootstrap_new_socket_listener( AWS_LOGF_INFO( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: attempting to initialize a new " - "server socket listener for %s:%d", + "server socket listener for %s:%u", (void *)bootstrap_options->bootstrap, bootstrap_options->host_name, - (int)bootstrap_options->port); + bootstrap_options->port); aws_ref_count_init( &server_connection_args->ref_count, diff --git a/contrib/restricted/aws/aws-c-io/source/event_loop.c b/contrib/restricted/aws/aws-c-io/source/event_loop.c index 5870b87e8d8..1e7aef676cc 100644 --- a/contrib/restricted/aws/aws-c-io/source/event_loop.c +++ b/contrib/restricted/aws/aws-c-io/source/event_loop.c @@ -165,12 +165,16 @@ static struct aws_event_loop_group *s_event_loop_group_new( return el_group; -on_error: +on_error:; + /* cache the error code to prevent any potential side effects */ + int cached_error_code = aws_last_error(); aws_mem_release(alloc, usable_cpus); s_aws_event_loop_group_shutdown_sync(el_group); s_event_loop_group_thread_exit(el_group); + /* raise the cached error code */ + aws_raise_error(cached_error_code); return NULL; } @@ -398,7 +402,7 @@ int aws_event_loop_fetch_local_object( return AWS_OP_SUCCESS; } - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } int aws_event_loop_put_local_object(struct aws_event_loop *event_loop, struct aws_event_loop_local_object *obj) { diff --git a/contrib/restricted/aws/aws-c-io/source/exponential_backoff_retry_strategy.c b/contrib/restricted/aws/aws-c-io/source/exponential_backoff_retry_strategy.c index 298cca9af1a..cf247226995 100644 --- a/contrib/restricted/aws/aws-c-io/source/exponential_backoff_retry_strategy.c +++ b/contrib/restricted/aws/aws-c-io/source/exponential_backoff_retry_strategy.c @@ -26,6 +26,7 @@ struct exponential_backoff_retry_token { struct aws_atomic_var last_backoff; size_t max_retries; uint64_t backoff_scale_factor_ns; + uint64_t maximum_backoff_ns; enum aws_exponential_backoff_jitter_mode jitter_mode; /* Let's not make this worse by constantly moving across threads if we can help it */ struct aws_event_loop *bound_loop; @@ -139,6 +140,8 @@ static int s_exponential_retry_acquire_token( backoff_retry_token->max_retries = exponential_backoff_strategy->config.max_retries; backoff_retry_token->backoff_scale_factor_ns = aws_timestamp_convert( exponential_backoff_strategy->config.backoff_scale_factor_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); + backoff_retry_token->maximum_backoff_ns = aws_timestamp_convert( + exponential_backoff_strategy->config.max_backoff_secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); backoff_retry_token->jitter_mode = exponential_backoff_strategy->config.jitter_mode; backoff_retry_token->generate_random = exponential_backoff_strategy->config.generate_random; backoff_retry_token->generate_random_impl = exponential_backoff_strategy->config.generate_random_impl; @@ -184,7 +187,8 @@ typedef uint64_t(compute_backoff_fn)(struct exponential_backoff_retry_token *tok static uint64_t s_compute_no_jitter(struct exponential_backoff_retry_token *token) { uint64_t retry_count = aws_min_u64(aws_atomic_load_int(&token->current_retry_count), 63); - return aws_mul_u64_saturating((uint64_t)1 << retry_count, token->backoff_scale_factor_ns); + uint64_t backoff_ns = aws_mul_u64_saturating((uint64_t)1 << retry_count, token->backoff_scale_factor_ns); + return aws_min_u64(backoff_ns, token->maximum_backoff_ns); } static uint64_t s_compute_full_jitter(struct exponential_backoff_retry_token *token) { @@ -198,8 +202,8 @@ static uint64_t s_compute_deccorelated_jitter(struct exponential_backoff_retry_t if (!last_backoff_val) { return s_compute_full_jitter(token); } - - return s_random_in_range(token->backoff_scale_factor_ns, aws_mul_u64_saturating(last_backoff_val, 3), token); + uint64_t backoff_ns = aws_min_u64(token->maximum_backoff_ns, aws_mul_u64_saturating(last_backoff_val, 3)); + return s_random_in_range(token->backoff_scale_factor_ns, backoff_ns, token); } static compute_backoff_fn *s_backoff_compute_table[] = { @@ -369,7 +373,11 @@ struct aws_retry_strategy *aws_retry_strategy_new_exponential_backoff( } if (!exponential_backoff_strategy->config.backoff_scale_factor_ms) { - exponential_backoff_strategy->config.backoff_scale_factor_ms = 25; + exponential_backoff_strategy->config.backoff_scale_factor_ms = 500; + } + + if (!exponential_backoff_strategy->config.max_backoff_secs) { + exponential_backoff_strategy->config.max_backoff_secs = 20; } if (config->shutdown_options) { diff --git a/contrib/restricted/aws/aws-c-io/source/future.c b/contrib/restricted/aws/aws-c-io/source/future.c new file mode 100644 index 00000000000..be213184be6 --- /dev/null +++ b/contrib/restricted/aws/aws-c-io/source/future.c @@ -0,0 +1,543 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/io/future.h> + +#include <aws/common/condition_variable.h> +#include <aws/common/mutex.h> +#include <aws/common/ref_count.h> +#include <aws/common/task_scheduler.h> +#include <aws/io/channel.h> +#include <aws/io/event_loop.h> + +enum aws_future_type { + AWS_FUTURE_T_BY_VALUE, + AWS_FUTURE_T_BY_VALUE_WITH_CLEAN_UP, + AWS_FUTURE_T_POINTER, + AWS_FUTURE_T_POINTER_WITH_DESTROY, + AWS_FUTURE_T_POINTER_WITH_RELEASE, +}; + +struct aws_future_callback_data { + aws_future_callback_fn *fn; + void *user_data; + union aws_future_callback_union { + struct aws_event_loop *event_loop; + struct aws_channel *channel; + } u; + enum aws_future_callback_type { + AWS_FUTURE_IMMEDIATE_CALLBACK, + AWS_FUTURE_EVENT_LOOP_CALLBACK, + AWS_FUTURE_CHANNEL_CALLBACK, + } type; +}; + +/* When allocating aws_future<T> on the heap, we make 1 allocation containing: + * aws_future_impl followed by T */ +struct aws_future_impl { + struct aws_allocator *alloc; + struct aws_ref_count ref_count; + struct aws_mutex lock; + struct aws_condition_variable wait_cvar; + struct aws_future_callback_data callback; + union { + aws_future_impl_result_clean_up_fn *clean_up; + aws_future_impl_result_destroy_fn *destroy; + aws_future_impl_result_release_fn *release; + } result_dtor; + int error_code; + /* sum of bit fields should be 32 */ +#define BIT_COUNT_FOR_SIZEOF_RESULT 27 + unsigned int sizeof_result : BIT_COUNT_FOR_SIZEOF_RESULT; + unsigned int type : 3; /* aws_future_type */ + unsigned int is_done : 1; + unsigned int owns_result : 1; +}; + +static void s_future_impl_result_dtor(struct aws_future_impl *future, void *result_addr) { + switch (future->type) { + case AWS_FUTURE_T_BY_VALUE_WITH_CLEAN_UP: { + future->result_dtor.clean_up(result_addr); + break; + } break; + + case AWS_FUTURE_T_POINTER_WITH_DESTROY: { + void *result = *(void **)result_addr; + if (result) { + future->result_dtor.destroy(result); + } + } break; + + case AWS_FUTURE_T_POINTER_WITH_RELEASE: { + void *result = *(void **)result_addr; + if (result) { + future->result_dtor.release(result); + } + } break; + + default: + break; + } +} + +static void s_future_impl_destroy(void *user_data) { + struct aws_future_impl *future = user_data; + if (future->owns_result && !future->error_code) { + s_future_impl_result_dtor(future, aws_future_impl_get_result_address(future)); + } + aws_condition_variable_clean_up(&future->wait_cvar); + aws_mutex_clean_up(&future->lock); + aws_mem_release(future->alloc, future); +} + +static struct aws_future_impl *s_future_impl_new(struct aws_allocator *alloc, size_t sizeof_result) { + size_t total_size = sizeof(struct aws_future_impl) + sizeof_result; + struct aws_future_impl *future = aws_mem_calloc(alloc, 1, total_size); + future->alloc = alloc; + + /* we store sizeof_result in a bit field, ensure the number will fit */ + AWS_ASSERT(sizeof_result <= (UINT_MAX >> (32 - BIT_COUNT_FOR_SIZEOF_RESULT))); + future->sizeof_result = (unsigned int)sizeof_result; + + aws_ref_count_init(&future->ref_count, future, s_future_impl_destroy); + aws_mutex_init(&future->lock); + aws_condition_variable_init(&future->wait_cvar); + return future; +} + +struct aws_future_impl *aws_future_impl_new_by_value(struct aws_allocator *alloc, size_t sizeof_result) { + struct aws_future_impl *future = s_future_impl_new(alloc, sizeof_result); + future->type = AWS_FUTURE_T_BY_VALUE; + return future; +} + +struct aws_future_impl *aws_future_impl_new_by_value_with_clean_up( + struct aws_allocator *alloc, + size_t sizeof_result, + aws_future_impl_result_clean_up_fn *result_clean_up) { + + AWS_ASSERT(result_clean_up); + struct aws_future_impl *future = s_future_impl_new(alloc, sizeof_result); + future->type = AWS_FUTURE_T_BY_VALUE_WITH_CLEAN_UP; + future->result_dtor.clean_up = result_clean_up; + return future; +} + +struct aws_future_impl *aws_future_impl_new_pointer(struct aws_allocator *alloc) { + struct aws_future_impl *future = s_future_impl_new(alloc, sizeof(void *)); + future->type = AWS_FUTURE_T_POINTER; + return future; +} + +struct aws_future_impl *aws_future_impl_new_pointer_with_destroy( + struct aws_allocator *alloc, + aws_future_impl_result_destroy_fn *result_destroy) { + + AWS_ASSERT(result_destroy); + struct aws_future_impl *future = s_future_impl_new(alloc, sizeof(void *)); + future->type = AWS_FUTURE_T_POINTER_WITH_DESTROY; + future->result_dtor.destroy = result_destroy; + return future; +} + +struct aws_future_impl *aws_future_impl_new_pointer_with_release( + struct aws_allocator *alloc, + aws_future_impl_result_release_fn *result_release) { + + AWS_ASSERT(result_release); + struct aws_future_impl *future = s_future_impl_new(alloc, sizeof(void *)); + future->type = AWS_FUTURE_T_POINTER_WITH_RELEASE; + future->result_dtor.release = result_release; + return future; +} + +struct aws_future_impl *aws_future_impl_release(struct aws_future_impl *future) { + if (future != NULL) { + aws_ref_count_release(&future->ref_count); + } + return NULL; +} + +struct aws_future_impl *aws_future_impl_acquire(struct aws_future_impl *future) { + if (future != NULL) { + aws_ref_count_acquire(&future->ref_count); + } + return future; +} + +bool aws_future_impl_is_done(const struct aws_future_impl *future) { + AWS_ASSERT(future); + + /* this function is conceptually const, but we need to hold the lock a moment */ + struct aws_mutex *mutable_lock = (struct aws_mutex *)&future->lock; + + /* BEGIN CRITICAL SECTION */ + aws_mutex_lock(mutable_lock); + bool is_done = future->is_done != 0; + aws_mutex_unlock(mutable_lock); + /* END CRITICAL SECTION */ + + return is_done; +} + +int aws_future_impl_get_error(const struct aws_future_impl *future) { + AWS_ASSERT(future != NULL); + /* not bothering with lock, none of this can change after future is done */ + AWS_FATAL_ASSERT(future->is_done && "Cannot get error before future is done"); + return future->error_code; +} + +void *aws_future_impl_get_result_address(const struct aws_future_impl *future) { + AWS_ASSERT(future != NULL); + /* not bothering with lock, none of this can change after future is done */ + AWS_FATAL_ASSERT(future->is_done && "Cannot get result before future is done"); + AWS_FATAL_ASSERT(!future->error_code && "Cannot get result from future that failed with an error"); + AWS_FATAL_ASSERT(future->owns_result && "Result was already moved from future"); + + const struct aws_future_impl *address_of_memory_after_this_struct = future + 1; + void *result_addr = (void *)address_of_memory_after_this_struct; + return result_addr; +} + +void aws_future_impl_get_result_by_move(struct aws_future_impl *future, void *dst_address) { + void *result_addr = aws_future_impl_get_result_address(future); + memcpy(dst_address, result_addr, future->sizeof_result); + memset(result_addr, 0, future->sizeof_result); + future->owns_result = false; +} + +/* Data for invoking callback as a task on an event-loop */ +struct aws_future_event_loop_callback_job { + struct aws_allocator *alloc; + struct aws_task task; + struct aws_event_loop *event_loop; + aws_future_callback_fn *callback; + void *user_data; +}; + +static void s_future_impl_event_loop_callback_task(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)task; + (void)status; + struct aws_future_event_loop_callback_job *job = arg; + job->callback(job->user_data); + // TODO: aws_event_loop_release(job->event_loop); + aws_mem_release(job->alloc, job); +} + +/* Data for invoking callback as a task on an aws_channel */ +struct aws_future_channel_callback_job { + struct aws_allocator *alloc; + struct aws_channel_task task; + struct aws_channel *channel; + aws_future_callback_fn *callback; + void *user_data; +}; + +static void s_future_impl_channel_callback_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { + (void)task; + (void)status; + struct aws_future_channel_callback_job *job = arg; + job->callback(job->user_data); + aws_channel_release_hold(job->channel); + aws_mem_release(job->alloc, job); +} + +static void s_future_impl_invoke_callback(struct aws_future_callback_data *callback, struct aws_allocator *alloc) { + AWS_ASSERT(callback->fn); + + switch (callback->type) { + case AWS_FUTURE_IMMEDIATE_CALLBACK: { + callback->fn(callback->user_data); + } break; + + case AWS_FUTURE_EVENT_LOOP_CALLBACK: { + /* Schedule the callback as a task on the event-loop */ + struct aws_future_event_loop_callback_job *job = + aws_mem_calloc(alloc, 1, sizeof(struct aws_future_event_loop_callback_job)); + job->alloc = alloc; + aws_task_init(&job->task, s_future_impl_event_loop_callback_task, job, "aws_future_event_loop_callback"); + job->event_loop = callback->u.event_loop; + job->callback = callback->fn; + job->user_data = callback->user_data; + + aws_event_loop_schedule_task_now(callback->u.event_loop, &job->task); + } break; + + case AWS_FUTURE_CHANNEL_CALLBACK: { + /* Schedule the callback as a task on the channel */ + struct aws_future_channel_callback_job *job = + aws_mem_calloc(alloc, 1, sizeof(struct aws_future_channel_callback_job)); + job->alloc = alloc; + aws_channel_task_init(&job->task, s_future_impl_channel_callback_task, job, "aws_future_channel_callback"); + job->channel = callback->u.channel; + job->callback = callback->fn; + job->user_data = callback->user_data; + + aws_channel_schedule_task_now(callback->u.channel, &job->task); + } break; + } +} + +static void s_future_impl_set_done(struct aws_future_impl *future, void *src_address, int error_code) { + bool is_error = error_code != 0; + + /* BEGIN CRITICAL SECTION */ + aws_mutex_lock(&future->lock); + + struct aws_future_callback_data callback = future->callback; + + bool first_time = !future->is_done; + if (first_time) { + future->is_done = true; + AWS_ZERO_STRUCT(future->callback); + if (is_error) { + future->error_code = error_code; + } else { + future->owns_result = true; + AWS_FATAL_ASSERT(src_address != NULL); + memcpy(aws_future_impl_get_result_address(future), src_address, future->sizeof_result); + } + + aws_condition_variable_notify_all(&future->wait_cvar); + } + + aws_mutex_unlock(&future->lock); + /* END CRITICAL SECTION */ + + if (first_time) { + /* if callback was registered, invoke it now, outside of critical section to avoid deadlock */ + if (callback.fn != NULL) { + s_future_impl_invoke_callback(&callback, future->alloc); + } + } else if (!error_code) { + /* future was already done, so just destroy this newer result */ + s_future_impl_result_dtor(future, src_address); + } +} + +void aws_future_impl_set_error(struct aws_future_impl *future, int error_code) { + AWS_ASSERT(future); + + /* handle recoverable usage error */ + AWS_ASSERT(error_code != 0); + if (AWS_UNLIKELY(error_code == 0)) { + error_code = AWS_ERROR_UNKNOWN; + } + + s_future_impl_set_done(future, NULL /*src_address*/, error_code); +} + +void aws_future_impl_set_result_by_move(struct aws_future_impl *future, void *src_address) { + AWS_ASSERT(future); + AWS_ASSERT(src_address); + s_future_impl_set_done(future, src_address, 0 /*error_code*/); + + /* the future takes ownership of the result. + * zero out memory at the src_address to reinforce this transfer of ownership. */ + memset(src_address, 0, future->sizeof_result); +} + +/* Returns true if callback was registered, or false if callback was ignored + * because the the future is already done and invoke_if_already_done==false */ +static bool s_future_impl_register_callback( + struct aws_future_impl *future, + struct aws_future_callback_data *callback, + bool invoke_if_already_done) { + + /* BEGIN CRITICAL SECTION */ + aws_mutex_lock(&future->lock); + + AWS_FATAL_ASSERT(future->callback.fn == NULL && "Future done callback must only be set once"); + + bool already_done = future->is_done != 0; + + /* if not done, store callback for later */ + if (!already_done) { + future->callback = *callback; + } + + aws_mutex_unlock(&future->lock); + /* END CRITICAL SECTION */ + + /* if already done, invoke callback now */ + if (already_done && invoke_if_already_done) { + s_future_impl_invoke_callback(callback, future->alloc); + } + + return !already_done || invoke_if_already_done; +} + +void aws_future_impl_register_callback( + struct aws_future_impl *future, + aws_future_callback_fn *on_done, + void *user_data) { + + AWS_ASSERT(future); + AWS_ASSERT(on_done); + + struct aws_future_callback_data callback = { + .fn = on_done, + .user_data = user_data, + .type = AWS_FUTURE_IMMEDIATE_CALLBACK, + }; + s_future_impl_register_callback(future, &callback, true /*invoke_if_already_done*/); +} + +bool aws_future_impl_register_callback_if_not_done( + struct aws_future_impl *future, + aws_future_callback_fn *on_done, + void *user_data) { + + AWS_ASSERT(future); + AWS_ASSERT(on_done); + + struct aws_future_callback_data callback = { + .fn = on_done, + .user_data = user_data, + .type = AWS_FUTURE_IMMEDIATE_CALLBACK, + }; + return s_future_impl_register_callback(future, &callback, false /*invoke_if_already_done*/); +} + +void aws_future_impl_register_event_loop_callback( + struct aws_future_impl *future, + struct aws_event_loop *event_loop, + aws_future_callback_fn *on_done, + void *user_data) { + + AWS_ASSERT(future); + AWS_ASSERT(event_loop); + AWS_ASSERT(on_done); + + // TODO: aws_event_loop_acquire(event_loop); + + struct aws_future_callback_data callback = { + .fn = on_done, + .user_data = user_data, + .type = AWS_FUTURE_EVENT_LOOP_CALLBACK, + .u = {.event_loop = event_loop}, + }; + s_future_impl_register_callback(future, &callback, true /*invoke_if_already_done*/); +} + +void aws_future_impl_register_channel_callback( + struct aws_future_impl *future, + struct aws_channel *channel, + aws_future_callback_fn *on_done, + void *user_data) { + + AWS_ASSERT(future); + AWS_ASSERT(channel); + AWS_ASSERT(on_done); + + aws_channel_acquire_hold(channel); + + struct aws_future_callback_data callback = { + .fn = on_done, + .user_data = user_data, + .type = AWS_FUTURE_CHANNEL_CALLBACK, + .u = {.channel = channel}, + }; + s_future_impl_register_callback(future, &callback, true /*invoke_if_already_done*/); +} + +static bool s_future_impl_is_done_pred(void *user_data) { + struct aws_future_impl *future = user_data; + return future->is_done != 0; +} + +bool aws_future_impl_wait(const struct aws_future_impl *future, uint64_t timeout_ns) { + AWS_ASSERT(future); + + /* this function is conceptually const, but we need to use synchronization primitives */ + struct aws_future_impl *mutable_future = (struct aws_future_impl *)future; + + /* BEGIN CRITICAL SECTION */ + aws_mutex_lock(&mutable_future->lock); + + bool is_done = aws_condition_variable_wait_for_pred( + &mutable_future->wait_cvar, + &mutable_future->lock, + (int64_t)timeout_ns, + s_future_impl_is_done_pred, + mutable_future) == AWS_OP_SUCCESS; + + aws_mutex_unlock(&mutable_future->lock); + /* END CRITICAL SECTION */ + + return is_done; +} + +// AWS_FUTURE_T_BY_VALUE_IMPLEMENTATION(aws_future_bool, bool) +struct aws_future_bool *aws_future_bool_new(struct aws_allocator *alloc) { + return (struct aws_future_bool *)aws_future_impl_new_by_value(alloc, sizeof(_Bool)); +} +void aws_future_bool_set_result(struct aws_future_bool *future, _Bool result) { + aws_future_impl_set_result_by_move((struct aws_future_impl *)future, &result); +} +_Bool aws_future_bool_get_result(const struct aws_future_bool *future) { + return *(_Bool *)aws_future_impl_get_result_address((const struct aws_future_impl *)future); +} +struct aws_future_bool *aws_future_bool_acquire(struct aws_future_bool *future) { + return (struct aws_future_bool *)aws_future_impl_acquire((struct aws_future_impl *)future); +} +struct aws_future_bool *aws_future_bool_release(struct aws_future_bool *future) { + return (struct aws_future_bool *)aws_future_impl_release((struct aws_future_impl *)future); +} +void aws_future_bool_set_error(struct aws_future_bool *future, int error_code) { + aws_future_impl_set_error((struct aws_future_impl *)future, error_code); +} +_Bool aws_future_bool_is_done(const struct aws_future_bool *future) { + return aws_future_impl_is_done((const struct aws_future_impl *)future); +} +int aws_future_bool_get_error(const struct aws_future_bool *future) { + return aws_future_impl_get_error((const struct aws_future_impl *)future); +} +void aws_future_bool_register_callback( + struct aws_future_bool *future, + aws_future_callback_fn *on_done, + void *user_data) { + aws_future_impl_register_callback((struct aws_future_impl *)future, on_done, user_data); +} +_Bool aws_future_bool_register_callback_if_not_done( + struct aws_future_bool *future, + aws_future_callback_fn *on_done, + void *user_data) { + return aws_future_impl_register_callback_if_not_done((struct aws_future_impl *)future, on_done, user_data); +} +void aws_future_bool_register_event_loop_callback( + struct aws_future_bool *future, + struct aws_event_loop *event_loop, + aws_future_callback_fn *on_done, + void *user_data) { + aws_future_impl_register_event_loop_callback((struct aws_future_impl *)future, event_loop, on_done, user_data); +} +void aws_future_bool_register_channel_callback( + struct aws_future_bool *future, + struct aws_channel *channel, + aws_future_callback_fn *on_done, + void *user_data) { + aws_future_impl_register_channel_callback((struct aws_future_impl *)future, channel, on_done, user_data); +} +_Bool aws_future_bool_wait(struct aws_future_bool *future, uint64_t timeout_ns) { + return aws_future_impl_wait((struct aws_future_impl *)future, timeout_ns); +} + +AWS_FUTURE_T_BY_VALUE_IMPLEMENTATION(aws_future_size, size_t) + +/** + * aws_future<void> + */ +AWS_FUTURE_T_IMPLEMENTATION_BEGIN(aws_future_void) + +struct aws_future_void *aws_future_void_new(struct aws_allocator *alloc) { + /* Use aws_future<bool> under the hood, to avoid edge-cases with 0-sized result */ + return (struct aws_future_void *)aws_future_bool_new(alloc); +} + +void aws_future_void_set_result(struct aws_future_void *future) { + aws_future_bool_set_result((struct aws_future_bool *)future, false); +} + +AWS_FUTURE_T_IMPLEMENTATION_END(aws_future_void) diff --git a/contrib/restricted/aws/aws-c-io/source/io.c b/contrib/restricted/aws/aws-c-io/source/io.c index 106ec0f9781..c47ce97a940 100644 --- a/contrib/restricted/aws/aws-c-io/source/io.c +++ b/contrib/restricted/aws/aws-c-io/source/io.c @@ -7,6 +7,7 @@ #include <aws/io/logging.h> #include <aws/cal/cal.h> +#include <aws/io/private/tracing.h> #define AWS_DEFINE_ERROR_INFO_IO(CODE, STR) [(CODE)-0x0400] = AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-io") @@ -300,6 +301,11 @@ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_IO( AWS_IO_STREAM_GET_LENGTH_UNSUPPORTED, "Get length is not supported in the underlying I/O source."), + AWS_DEFINE_ERROR_INFO_IO( + AWS_IO_TLS_ERROR_READ_FAILURE, + "Failure during TLS read."), + AWS_DEFINE_ERROR_INFO_IO(AWS_ERROR_PEM_MALFORMED, "Malformed PEM object encountered."), + }; /* clang-format on */ @@ -336,7 +342,7 @@ static struct aws_log_subject_info s_io_log_subject_infos[] = { "standard-retry-strategy", "Subject for standard retry strategy"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_PKCS11, "pkcs11", "Subject for PKCS#11 library operations"), -}; + DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_PEM, "pem", "Subject for pem operations")}; static struct aws_log_subject_info_list s_io_log_subject_list = { .subject_list = s_io_log_subject_infos, @@ -356,6 +362,7 @@ void aws_io_library_init(struct aws_allocator *allocator) { aws_register_error_info(&s_list); aws_register_log_subject_info_list(&s_io_log_subject_list); aws_tls_init_static_state(allocator); + aws_io_tracing_init(); } } diff --git a/contrib/restricted/aws/aws-c-io/source/linux/epoll_event_loop.c b/contrib/restricted/aws/aws-c-io/source/linux/epoll_event_loop.c index af1ed765df4..094a7836a99 100644 --- a/contrib/restricted/aws/aws-c-io/source/linux/epoll_event_loop.c +++ b/contrib/restricted/aws/aws-c-io/source/linux/epoll_event_loop.c @@ -5,11 +5,13 @@ #include <aws/io/event_loop.h> +#include <aws/cal/cal.h> #include <aws/common/atomics.h> #include <aws/common/clock.h> #include <aws/common/mutex.h> #include <aws/common/task_scheduler.h> #include <aws/common/thread.h> +#include <aws/io/private/tracing.h> #include <aws/io/logging.h> @@ -562,6 +564,12 @@ static int aws_event_loop_listen_for_io_events(int epoll_fd, struct epoll_event return epoll_wait(epoll_fd, events, MAX_EVENTS, timeout); } +static void s_aws_epoll_cleanup_aws_lc_thread_local_state(void *user_data) { + (void)user_data; + + aws_cal_thread_clean_up(); +} + static void aws_event_loop_thread(void *args) { struct aws_event_loop *event_loop = args; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: main loop started", (void *)event_loop); @@ -576,6 +584,8 @@ static void aws_event_loop_thread(void *args) { return; } + aws_thread_current_at_exit(s_aws_epoll_cleanup_aws_lc_thread_local_state, NULL); + int timeout = DEFAULT_TIMEOUT; struct epoll_event events[MAX_EVENTS]; @@ -606,6 +616,8 @@ static void aws_event_loop_thread(void *args) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: wake up with %d events to process.", (void *)event_loop, event_count); + + __itt_task_begin(io_tracing_domain, __itt_null, __itt_null, tracing_event_loop_events); for (int i = 0; i < event_count; ++i) { struct epoll_event_data *event_data = (struct epoll_event_data *)events[i].data.ptr; @@ -636,9 +648,12 @@ static void aws_event_loop_thread(void *args) { "id=%p: activity on fd %d, invoking handler.", (void *)event_loop, event_data->handle->data.fd); + __itt_task_begin(io_tracing_domain, __itt_null, __itt_null, tracing_event_loop_event); event_data->on_event(event_loop, event_data->handle, event_mask, event_data->user_data); + __itt_task_end(io_tracing_domain); } } + __itt_task_end(io_tracing_domain); /* run scheduled tasks */ s_process_task_pre_queue(event_loop); @@ -647,7 +662,9 @@ static void aws_event_loop_thread(void *args) { event_loop->clock(&now_ns); /* if clock fails, now_ns will be 0 and tasks scheduled for a specific time will not be run. That's ok, we'll handle them next time around. */ AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: running scheduled tasks.", (void *)event_loop); + __itt_task_begin(io_tracing_domain, __itt_null, __itt_null, tracing_event_loop_run_tasks); aws_task_scheduler_run_all(&epoll_loop->scheduler, now_ns); + __itt_task_end(io_tracing_domain); /* set timeout for next epoll_wait() call. * if clock fails, or scheduler has no tasks, use default timeout */ diff --git a/contrib/restricted/aws/aws-c-io/source/pem.c b/contrib/restricted/aws/aws-c-io/source/pem.c new file mode 100644 index 00000000000..154a32cd9ad --- /dev/null +++ b/contrib/restricted/aws/aws-c-io/source/pem.c @@ -0,0 +1,436 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/common/encoding.h> +#include <aws/common/string.h> +#include <aws/io/pem.h> +#include <aws/io/private/pem_utils.h> + +#include <aws/io/logging.h> + +enum aws_pem_parse_state { + BEGIN, + ON_DATA, + END, +}; + +static const struct aws_byte_cursor begin_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----BEGIN"); +static const struct aws_byte_cursor end_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----END"); +static const struct aws_byte_cursor dashes = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----"); + +int aws_sanitize_pem(struct aws_byte_buf *pem, struct aws_allocator *allocator) { + if (!pem->len) { + /* reject files with no PEM data */ + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + struct aws_byte_buf clean_pem_buf; + if (aws_byte_buf_init(&clean_pem_buf, allocator, pem->len)) { + return AWS_OP_ERR; + } + struct aws_byte_cursor pem_cursor = aws_byte_cursor_from_buf(pem); + enum aws_pem_parse_state state = BEGIN; + + for (size_t i = 0; i < pem_cursor.len; i++) { + /* parse through the pem once */ + char current = *(pem_cursor.ptr + i); + switch (state) { + case BEGIN: + if (current == '-') { + struct aws_byte_cursor compare_cursor = pem_cursor; + compare_cursor.len = begin_header.len; + compare_cursor.ptr += i; + if (aws_byte_cursor_eq(&compare_cursor, &begin_header)) { + state = ON_DATA; + i--; + } + } + break; + case ON_DATA: + /* start copying everything */ + if (current == '-') { + struct aws_byte_cursor compare_cursor = pem_cursor; + compare_cursor.len = end_header.len; + compare_cursor.ptr += i; + if (aws_byte_cursor_eq(&compare_cursor, &end_header)) { + /* Copy the end header string and start to search for the end part of a pem */ + state = END; + aws_byte_buf_append(&clean_pem_buf, &end_header); + i += (end_header.len - 1); + break; + } + } + aws_byte_buf_append_byte_dynamic(&clean_pem_buf, (uint8_t)current); + break; + case END: + if (current == '-') { + struct aws_byte_cursor compare_cursor = pem_cursor; + compare_cursor.len = dashes.len; + compare_cursor.ptr += i; + if (aws_byte_cursor_eq(&compare_cursor, &dashes)) { + /* End part of a pem, copy the last 5 dashes and a new line, then ignore everything before next + * begin header */ + state = BEGIN; + aws_byte_buf_append(&clean_pem_buf, &dashes); + i += (dashes.len - 1); + aws_byte_buf_append_byte_dynamic(&clean_pem_buf, (uint8_t)'\n'); + break; + } + } + aws_byte_buf_append_byte_dynamic(&clean_pem_buf, (uint8_t)current); + break; + default: + break; + } + } + + if (clean_pem_buf.len == 0) { + /* No valid data remains after sanitization. File might have been the wrong format */ + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + goto error; + } + + struct aws_byte_cursor clean_pem_cursor = aws_byte_cursor_from_buf(&clean_pem_buf); + aws_byte_buf_reset(pem, true); + aws_byte_buf_append_dynamic(pem, &clean_pem_cursor); + aws_byte_buf_clean_up(&clean_pem_buf); + return AWS_OP_SUCCESS; + +error: + aws_byte_buf_clean_up(&clean_pem_buf); + return AWS_OP_ERR; +} + +/* + * Possible PEM object types. openssl/pem.h used as a source of truth for + * possible types. + */ +static struct aws_byte_cursor s_pem_type_x509_old_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("X509 CERTIFICATE"); +static struct aws_byte_cursor s_pem_type_x509_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CERTIFICATE"); +static struct aws_byte_cursor s_pem_type_x509_trusted_cur = + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("TRUSTED CERTIFICATE"); +static struct aws_byte_cursor s_pem_type_x509_req_old_cur = + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("NEW CERTIFICATE REQUEST"); +static struct aws_byte_cursor s_pem_type_x509_req_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CERTIFICATE REQUEST"); +static struct aws_byte_cursor s_pem_type_x509_crl_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("X509 CRL"); +static struct aws_byte_cursor s_pem_type_evp_pkey_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ANY PRIVATE KEY"); +static struct aws_byte_cursor s_pem_type_public_pkcs8_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PUBLIC KEY"); +static struct aws_byte_cursor s_pem_type_private_rsa_pkcs1_cur = + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("RSA PRIVATE KEY"); +static struct aws_byte_cursor s_pem_type_public_rsa_pkcs1_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("RSA PUBLIC KEY"); +static struct aws_byte_cursor s_pem_type_private_dsa_pkcs1_cur = + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("RSA PRIVATE KEY"); +static struct aws_byte_cursor s_pem_type_public_dsa_pkcs1_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("RSA PUBLIC KEY"); +static struct aws_byte_cursor s_pem_type_pkcs7_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PKCS7"); +static struct aws_byte_cursor s_pem_type_pkcs7_signed_data_cur = + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PKCS #7 SIGNED DATA"); +static struct aws_byte_cursor s_pem_type_private_pkcs8_encrypted_cur = + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ENCRYPTED PRIVATE KEY"); +static struct aws_byte_cursor s_pem_type_private_pkcs8_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PRIVATE KEY"); +static struct aws_byte_cursor s_pem_type_dh_parameters_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("DH PARAMETERS"); +static struct aws_byte_cursor s_pem_type_dh_parameters_x942_cur = + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("X9.42 DH PARAMETERS"); +static struct aws_byte_cursor s_pem_type_ssl_session_parameters_cur = + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SSL SESSION PARAMETERS"); +static struct aws_byte_cursor s_pem_type_dsa_parameters_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("DSA PARAMETERS"); +static struct aws_byte_cursor s_pem_type_ecdsa_public_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ECDSA PUBLIC KEY"); +static struct aws_byte_cursor s_pem_type_ec_parameters_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("EC PARAMETERS"); +static struct aws_byte_cursor s_pem_type_ec_private_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("EC PRIVATE KEY"); +static struct aws_byte_cursor s_pem_type_parameters_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PARAMETERS"); +static struct aws_byte_cursor s_pem_type_cms_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CMS"); +static struct aws_byte_cursor s_pem_type_sm2_parameters_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SM2 PARAMETERS"); + +void aws_pem_objects_clean_up(struct aws_array_list *pem_objects) { + for (size_t i = 0; i < aws_array_list_length(pem_objects); ++i) { + struct aws_pem_object *pem_obj_ptr = NULL; + aws_array_list_get_at_ptr(pem_objects, (void **)&pem_obj_ptr, i); + + if (pem_obj_ptr != NULL) { + aws_byte_buf_clean_up_secure(&pem_obj_ptr->data); + aws_string_destroy(pem_obj_ptr->type_string); + } + } + + aws_array_list_clear(pem_objects); + aws_array_list_clean_up(pem_objects); +} + +enum aws_pem_object_type s_map_type_cur_to_type(struct aws_byte_cursor type_cur) { + /* + * Putting all those in a hash table might be a bit faster depending on + * hashing function cost, but it complicates code considerably for a + * potential small gain. PEM parsing is already slow due to multiple + * allocations and should not be used in perf critical places. + * So choosing dumb and easy approach over something more complicated and we + * can reevaluate decision in the future. + */ + if (aws_byte_cursor_eq(&type_cur, &s_pem_type_x509_old_cur)) { + return AWS_PEM_TYPE_X509_OLD; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_x509_cur)) { + return AWS_PEM_TYPE_X509; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_x509_trusted_cur)) { + return AWS_PEM_TYPE_X509_TRUSTED; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_x509_req_old_cur)) { + return AWS_PEM_TYPE_X509_REQ_OLD; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_x509_req_cur)) { + return AWS_PEM_TYPE_X509_REQ; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_x509_crl_cur)) { + return AWS_PEM_TYPE_X509_CRL; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_evp_pkey_cur)) { + return AWS_PEM_TYPE_EVP_PKEY; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_public_pkcs8_cur)) { + return AWS_PEM_TYPE_PUBLIC_PKCS8; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_private_rsa_pkcs1_cur)) { + return AWS_PEM_TYPE_PRIVATE_RSA_PKCS1; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_public_rsa_pkcs1_cur)) { + return AWS_PEM_TYPE_PUBLIC_RSA_PKCS1; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_private_dsa_pkcs1_cur)) { + return AWS_PEM_TYPE_PRIVATE_DSA_PKCS1; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_public_dsa_pkcs1_cur)) { + return AWS_PEM_TYPE_PUBLIC_DSA_PKCS1; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_pkcs7_cur)) { + return AWS_PEM_TYPE_PKCS7; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_pkcs7_signed_data_cur)) { + return AWS_PEM_TYPE_PKCS7_SIGNED_DATA; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_private_pkcs8_encrypted_cur)) { + return AWS_PEM_TYPE_PRIVATE_PKCS8_ENCRYPTED; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_private_pkcs8_cur)) { + return AWS_PEM_TYPE_PRIVATE_PKCS8; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_dh_parameters_cur)) { + return AWS_PEM_TYPE_DH_PARAMETERS; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_dh_parameters_x942_cur)) { + return AWS_PEM_TYPE_DH_PARAMETERS_X942; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_ssl_session_parameters_cur)) { + return AWS_PEM_TYPE_SSL_SESSION_PARAMETERS; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_dsa_parameters_cur)) { + return AWS_PEM_TYPE_DSA_PARAMETERS; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_ecdsa_public_cur)) { + return AWS_PEM_TYPE_ECDSA_PUBLIC; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_ec_parameters_cur)) { + return AWS_PEM_TYPE_EC_PARAMETERS; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_ec_private_cur)) { + return AWS_PEM_TYPE_EC_PRIVATE; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_parameters_cur)) { + return AWS_PEM_TYPE_PARAMETERS; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_cms_cur)) { + return AWS_PEM_TYPE_CMS; + } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_sm2_parameters_cur)) { + return AWS_PEM_TYPE_SM2_PARAMETERS; + } + + return AWS_PEM_TYPE_UNKNOWN; +} + +static struct aws_byte_cursor s_begin_header_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----BEGIN"); +static struct aws_byte_cursor s_end_header_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----END"); +static struct aws_byte_cursor s_delim_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----"); + +int s_extract_header_type_cur(struct aws_byte_cursor cur, struct aws_byte_cursor *out) { + if (!aws_byte_cursor_starts_with(&cur, &s_begin_header_cur)) { + AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Invalid PEM buffer: invalid begin token"); + return aws_raise_error(AWS_ERROR_PEM_MALFORMED); + } + + aws_byte_cursor_advance(&cur, s_begin_header_cur.len); + aws_byte_cursor_advance(&cur, 1); // space after begin + + struct aws_byte_cursor type_cur = aws_byte_cursor_advance(&cur, cur.len - s_delim_cur.len); + + if (!aws_byte_cursor_eq(&cur, &s_delim_cur)) { + AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Invalid PEM buffer: invalid end token"); + return aws_raise_error(AWS_ERROR_PEM_MALFORMED); + } + + *out = type_cur; + return AWS_OP_SUCCESS; +} + +static int s_convert_pem_to_raw_base64( + struct aws_allocator *allocator, + struct aws_byte_cursor pem, + struct aws_array_list *pem_objects) { + + struct aws_array_list split_buffers; + if (aws_array_list_init_dynamic(&split_buffers, allocator, 16, sizeof(struct aws_byte_cursor))) { + return AWS_OP_ERR; + } + + if (aws_byte_cursor_split_on_char(&pem, '\n', &split_buffers)) { + aws_array_list_clean_up(&split_buffers); + AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Invalid PEM buffer: failed to split on newline"); + return aws_raise_error(AWS_ERROR_PEM_MALFORMED); + } + + enum aws_pem_parse_state state = BEGIN; + bool on_length_calc = true; + size_t current_obj_len = 0; + size_t current_obj_start_index = 0; + struct aws_byte_buf current_obj_buf; + AWS_ZERO_STRUCT(current_obj_buf); + struct aws_byte_cursor current_obj_type_cur; + AWS_ZERO_STRUCT(current_obj_type_cur); + enum aws_pem_object_type current_obj_type = AWS_PEM_TYPE_UNKNOWN; + + size_t split_count = aws_array_list_length(&split_buffers); + size_t i = 0; + + while (i < split_count) { + struct aws_byte_cursor *line_cur_ptr = NULL; + int error = aws_array_list_get_at_ptr(&split_buffers, (void **)&line_cur_ptr, i); + /* should never fail as we control array size and how we index into list */ + AWS_FATAL_ASSERT(error == AWS_OP_SUCCESS); + + /* Burn off the padding in the buffer first. + * Worst case we'll only have to do this once per line in the buffer. */ + *line_cur_ptr = aws_byte_cursor_left_trim_pred(line_cur_ptr, aws_isspace); + + /* And make sure remove any space from right side */ + *line_cur_ptr = aws_byte_cursor_right_trim_pred(line_cur_ptr, aws_isspace); + + switch (state) { + case BEGIN: + if (aws_byte_cursor_starts_with(line_cur_ptr, &s_begin_header_cur)) { + if (s_extract_header_type_cur(*line_cur_ptr, ¤t_obj_type_cur)) { + goto on_end_of_loop; + } + current_obj_type = s_map_type_cur_to_type(current_obj_type_cur); + current_obj_start_index = i + 1; + state = ON_DATA; + } + ++i; + break; + /* this loops through the lines containing data twice. First to figure out the length, a second + * time to actually copy the data. */ + case ON_DATA: + /* Found end tag. */ + if (aws_byte_cursor_starts_with(line_cur_ptr, &s_end_header_cur)) { + if (on_length_calc) { + on_length_calc = false; + state = ON_DATA; + i = current_obj_start_index; + aws_byte_buf_init(¤t_obj_buf, allocator, current_obj_len); + + } else { + struct aws_pem_object pem_object = { + .data = current_obj_buf, + .type_string = aws_string_new_from_cursor(allocator, ¤t_obj_type_cur), + .type = current_obj_type, + }; + + if (aws_array_list_push_back(pem_objects, &pem_object)) { + goto on_end_of_loop; + } + state = BEGIN; + on_length_calc = true; + current_obj_len = 0; + ++i; + AWS_ZERO_STRUCT(current_obj_buf); + AWS_ZERO_STRUCT(current_obj_type_cur); + current_obj_type = AWS_PEM_TYPE_UNKNOWN; + } + /* actually on a line with data in it. */ + } else { + if (on_length_calc) { + current_obj_len += line_cur_ptr->len; + } else { + if (aws_byte_buf_append(¤t_obj_buf, line_cur_ptr)) { + goto on_end_of_loop; + } + } + ++i; + } + break; + default: + AWS_FATAL_ASSERT(false); + } + } + +/* + * Note: this function only hard error if nothing can be parsed out of file. + * Otherwise it succeeds and returns whatever was parsed successfully. + */ +on_end_of_loop: + aws_array_list_clean_up(&split_buffers); + aws_byte_buf_clean_up_secure(¤t_obj_buf); + + if (state == BEGIN && aws_array_list_length(pem_objects) > 0) { + return AWS_OP_SUCCESS; + } + + AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Invalid PEM buffer."); + aws_pem_objects_clean_up(pem_objects); + return aws_raise_error(AWS_ERROR_PEM_MALFORMED); +} + +int aws_pem_objects_init_from_file_contents( + struct aws_array_list *pem_objects, + struct aws_allocator *allocator, + struct aws_byte_cursor pem_cursor) { + AWS_PRECONDITION(allocator); + AWS_PRECONDITION(pem_objects != NULL); + + /* Init empty array list, ideally, the PEM should only has one key included. */ + if (aws_array_list_init_dynamic(pem_objects, allocator, 1, sizeof(struct aws_pem_object))) { + return AWS_OP_ERR; + } + + if (s_convert_pem_to_raw_base64(allocator, pem_cursor, pem_objects)) { + goto on_error; + } + + for (size_t i = 0; i < aws_array_list_length(pem_objects); ++i) { + struct aws_pem_object *pem_obj_ptr = NULL; + aws_array_list_get_at_ptr(pem_objects, (void **)&pem_obj_ptr, i); + struct aws_byte_cursor byte_cur = aws_byte_cursor_from_buf(&pem_obj_ptr->data); + + size_t decoded_len = 0; + if (aws_base64_compute_decoded_len(&byte_cur, &decoded_len)) { + AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Failed to get length for decoded base64 pem object."); + aws_raise_error(AWS_ERROR_PEM_MALFORMED); + goto on_error; + } + + struct aws_byte_buf decoded_buffer; + aws_byte_buf_init(&decoded_buffer, allocator, decoded_len); + + if (aws_base64_decode(&byte_cur, &decoded_buffer)) { + AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Failed to base 64 decode pem object."); + aws_raise_error(AWS_ERROR_PEM_MALFORMED); + aws_byte_buf_clean_up_secure(&decoded_buffer); + goto on_error; + } + + aws_byte_buf_clean_up_secure(&pem_obj_ptr->data); + pem_obj_ptr->data = decoded_buffer; + } + + return AWS_OP_SUCCESS; + +on_error: + aws_pem_objects_clean_up(pem_objects); + return AWS_OP_ERR; +} + +int aws_pem_objects_init_from_file_path( + struct aws_array_list *pem_objects, + struct aws_allocator *allocator, + const char *filename) { + + struct aws_byte_buf raw_file_buffer; + if (aws_byte_buf_init_from_file(&raw_file_buffer, allocator, filename)) { + AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Failed to read file %s.", filename); + return AWS_OP_ERR; + } + AWS_ASSERT(raw_file_buffer.buffer); + + struct aws_byte_cursor file_cursor = aws_byte_cursor_from_buf(&raw_file_buffer); + if (aws_pem_objects_init_from_file_contents(pem_objects, allocator, file_cursor)) { + aws_byte_buf_clean_up_secure(&raw_file_buffer); + AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Failed to decode PEM file %s.", filename); + return AWS_OP_ERR; + } + + aws_byte_buf_clean_up_secure(&raw_file_buffer); + + return AWS_OP_SUCCESS; +} diff --git a/contrib/restricted/aws/aws-c-io/source/pem_utils.c b/contrib/restricted/aws/aws-c-io/source/pem_utils.c deleted file mode 100644 index c3843ffd4ad..00000000000 --- a/contrib/restricted/aws/aws-c-io/source/pem_utils.c +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0. - */ -#include <aws/common/string.h> -#include <aws/io/private/pem_utils.h> - -enum aws_pem_util_state { - BEGIN, - ON_DATA, - END, -}; - -static const struct aws_byte_cursor begin_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----BEGIN"); -static const struct aws_byte_cursor end_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----END"); -static const struct aws_byte_cursor dashes = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----"); - -int aws_sanitize_pem(struct aws_byte_buf *pem, struct aws_allocator *allocator) { - if (!pem->len) { - /* reject files with no PEM data */ - return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - } - struct aws_byte_buf clean_pem_buf; - if (aws_byte_buf_init(&clean_pem_buf, allocator, pem->len)) { - return AWS_OP_ERR; - } - struct aws_byte_cursor pem_cursor = aws_byte_cursor_from_buf(pem); - int state = BEGIN; - - for (size_t i = 0; i < pem_cursor.len; i++) { - /* parse through the pem once */ - char current = *(pem_cursor.ptr + i); - switch (state) { - case BEGIN: - if (current == '-') { - struct aws_byte_cursor compare_cursor = pem_cursor; - compare_cursor.len = begin_header.len; - compare_cursor.ptr += i; - if (aws_byte_cursor_eq(&compare_cursor, &begin_header)) { - state = ON_DATA; - i--; - } - } - break; - case ON_DATA: - /* start copying everything */ - if (current == '-') { - struct aws_byte_cursor compare_cursor = pem_cursor; - compare_cursor.len = end_header.len; - compare_cursor.ptr += i; - if (aws_byte_cursor_eq(&compare_cursor, &end_header)) { - /* Copy the end header string and start to search for the end part of a pem */ - state = END; - aws_byte_buf_append(&clean_pem_buf, &end_header); - i += (end_header.len - 1); - break; - } - } - aws_byte_buf_append_byte_dynamic(&clean_pem_buf, (uint8_t)current); - break; - case END: - if (current == '-') { - struct aws_byte_cursor compare_cursor = pem_cursor; - compare_cursor.len = dashes.len; - compare_cursor.ptr += i; - if (aws_byte_cursor_eq(&compare_cursor, &dashes)) { - /* End part of a pem, copy the last 5 dashes and a new line, then ignore everything before next - * begin header */ - state = BEGIN; - aws_byte_buf_append(&clean_pem_buf, &dashes); - i += (dashes.len - 1); - aws_byte_buf_append_byte_dynamic(&clean_pem_buf, (uint8_t)'\n'); - break; - } - } - aws_byte_buf_append_byte_dynamic(&clean_pem_buf, (uint8_t)current); - break; - default: - break; - } - } - - if (clean_pem_buf.len == 0) { - /* No valid data remains after sanitization. File might have been the wrong format */ - aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - goto error; - } - - struct aws_byte_cursor clean_pem_cursor = aws_byte_cursor_from_buf(&clean_pem_buf); - aws_byte_buf_reset(pem, true); - aws_byte_buf_append_dynamic(pem, &clean_pem_cursor); - aws_byte_buf_clean_up(&clean_pem_buf); - return AWS_OP_SUCCESS; - -error: - aws_byte_buf_clean_up(&clean_pem_buf); - return AWS_OP_ERR; -} diff --git a/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11.h b/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11.h index 0d78dd71136..dd3d3549f68 100644 --- a/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11.h +++ b/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11.h @@ -1,265 +1,1766 @@ -/* Copyright (c) OASIS Open 2016. All Rights Reserved./ - * /Distributed under the terms of the OASIS IPR Policy, - * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY - * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A - * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. - */ - -/* Latest version of the specification: - * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html +/* This file is in the Public Domain. */ +/* This file is based on: https://github.com/latchset/pkcs11-headers/blob/main/public-domain/2.40/pkcs11.h */ +/* This file has been modified from its original version by Amazon: + * - removed final semicolon from #define ULONGDEF, to avoid "extra-semi" warning + * - removed final semicolon from #define STRUCTDEF, to avoid "extra-semi" warning */ -#ifndef _PKCS11_H_ -#define _PKCS11_H_ 1 +#ifndef _PD_PKCS11_ +#define _PD_PKCS11_ + +#define CRYPTOKI_VERSION_MAJOR 2 +#define CRYPTOKI_VERSION_MINOR 40 +#define CRYPTOKI_VERSION_AMENDMENT 0 + +/* Basic types */ +typedef unsigned char CK_BBOOL; +typedef unsigned char CK_BYTE; +typedef unsigned char CK_CHAR; +typedef unsigned char CK_UTF8CHAR; +typedef unsigned long int CK_ULONG; + +typedef CK_BBOOL * CK_BBOOL_PTR; +typedef CK_BYTE * CK_BYTE_PTR; +typedef CK_CHAR * CK_CHAR_PTR; +typedef CK_UTF8CHAR * CK_UTF8CHAR_PTR; +typedef CK_ULONG * CK_ULONG_PTR; + +/* Basic defines */ +#define NULL_PTR ((void *)0) +typedef void * CK_VOID_PTR; +typedef void ** CK_VOID_PTR_PTR; + +#define CK_EFFECTIVELY_INFINITE 0UL +#define CK_UNAVAILABLE_INFORMATION ~0UL +#define CK_INVALID_HANDLE 0UL +#define CK_TRUE 1 +#define CK_FALSE 0 + +/* CK_ types in alphabetical order */ +#define ULONGDEF(__name__) \ +typedef CK_ULONG __name__; \ +typedef __name__ * __name__ ## _PTR + +ULONGDEF(CK_ATTRIBUTE_TYPE); +ULONGDEF(CK_CERTIFICATE_CATEGORY); +ULONGDEF(CK_CERTIFICATE_TYPE); +ULONGDEF(CK_EC_KDF_TYPE); +ULONGDEF(CK_EXTRACT_PARAMS); +ULONGDEF(CK_FLAGS); +ULONGDEF(CK_HW_FEATURE_TYPE); +ULONGDEF(CK_JAVA_MIDP_SECURITY_DOMAIN); +ULONGDEF(CK_KEY_TYPE); +ULONGDEF(CK_MAC_GENERAL_PARAMS); +ULONGDEF(CK_MECHANISM_TYPE); +ULONGDEF(CK_NOTIFICATION); +ULONGDEF(CK_OBJECT_CLASS); +ULONGDEF(CK_OBJECT_HANDLE); +ULONGDEF(CK_OTP_PARAM_TYPE); +ULONGDEF(CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE); +ULONGDEF(CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE); +ULONGDEF(CK_RC2_PARAMS); +ULONGDEF(CK_RSA_PKCS_MGF_TYPE); +ULONGDEF(CK_RSA_PKCS_OAEP_SOURCE_TYPE); +ULONGDEF(CK_RV); +ULONGDEF(CK_SESSION_HANDLE); +ULONGDEF(CK_SLOT_ID); +ULONGDEF(CK_STATE); +ULONGDEF(CK_USER_TYPE); +ULONGDEF(CK_X9_42_DH_KDF_TYPE); + +/* domain specific values and constants */ + +/* CK (certificate) */ +#define CK_CERTIFICATE_CATEGORY_UNSPECIFIED 0UL +#define CK_CERTIFICATE_CATEGORY_TOKEN_USER 1UL +#define CK_CERTIFICATE_CATEGORY_AUTHORITY 2UL +#define CK_CERTIFICATE_CATEGORY_OTHER_ENTITY 3UL + +/* CK (OTP) */ +#define CK_OTP_VALUE 0UL +#define CK_OTP_PIN 1UL +#define CK_OTP_CHALLENGE 2UL +#define CK_OTP_TIME 3UL +#define CK_OTP_COUNTER 4UL +#define CK_OTP_FLAGS 5UL +#define CK_OTP_OUTPUT_LENGTH 6UL +#define CK_OTP_OUTPUT_FORMAT 7UL -#ifdef __cplusplus -extern "C" { +/* CK (OTP format) */ +#define CK_OTP_FORMAT_DECIMAL 0UL +#define CK_OTP_FORMAT_HEXADECIMAL 1UL +#define CK_OTP_FORMAT_ALPHANUMERIC 2UL +#define CK_OTP_FORMAT_BINARY 3UL + +/* CK (OTP requirement) */ +#define CK_OTP_PARAM_IGNORED 0UL +#define CK_OTP_PARAM_OPTIONAL 1UL +#define CK_OTP_PARAM_MANDATORY 2UL + +/* CK (security) */ +#define CK_SECURITY_DOMAIN_UNSPECIFIED 0UL +#define CK_SECURITY_DOMAIN_MANUFACTURER 1UL +#define CK_SECURITY_DOMAIN_OPERATOR 2UL +#define CK_SECURITY_DOMAIN_THIRD_PARTY 3UL + +/* CK (SP800 DKM) */ +#define CK_SP800_108_DKM_LENGTH_SUM_OF_KEYS 0x00000001UL +#define CK_SP800_108_DKM_LENGTH_SUM_OF_SEGMENTS 0x00000002UL + +/* CKA */ +#define CKA_CLASS 0x00000000UL +#define CKA_TOKEN 0x00000001UL +#define CKA_PRIVATE 0x00000002UL +#define CKA_LABEL 0x00000003UL +#define CKA_APPLICATION 0x00000010UL +#define CKA_VALUE 0x00000011UL +#define CKA_OBJECT_ID 0x00000012UL +#define CKA_CERTIFICATE_TYPE 0x00000080UL +#define CKA_ISSUER 0x00000081UL +#define CKA_SERIAL_NUMBER 0x00000082UL +#define CKA_AC_ISSUER 0x00000083UL +#define CKA_OWNER 0x00000084UL +#define CKA_ATTR_TYPES 0x00000085UL +#define CKA_TRUSTED 0x00000086UL +#define CKA_CERTIFICATE_CATEGORY 0x00000087UL +#define CKA_JAVA_MIDP_SECURITY_DOMAIN 0x00000088UL +#define CKA_URL 0x00000089UL +#define CKA_HASH_OF_SUBJECT_PUBLIC_KEY 0x0000008AUL +#define CKA_HASH_OF_ISSUER_PUBLIC_KEY 0x0000008BUL +#define CKA_NAME_HASH_ALGORITHM 0x0000008CUL +#define CKA_CHECK_VALUE 0x00000090UL +#define CKA_KEY_TYPE 0x00000100UL +#define CKA_SUBJECT 0x00000101UL +#define CKA_ID 0x00000102UL +#define CKA_SENSITIVE 0x00000103UL +#define CKA_ENCRYPT 0x00000104UL +#define CKA_DECRYPT 0x00000105UL +#define CKA_WRAP 0x00000106UL +#define CKA_UNWRAP 0x00000107UL +#define CKA_SIGN 0x00000108UL +#define CKA_SIGN_RECOVER 0x00000109UL +#define CKA_VERIFY 0x0000010AUL +#define CKA_VERIFY_RECOVER 0x0000010BUL +#define CKA_DERIVE 0x0000010CUL +#define CKA_START_DATE 0x00000110UL +#define CKA_END_DATE 0x00000111UL +#define CKA_MODULUS 0x00000120UL +#define CKA_MODULUS_BITS 0x00000121UL +#define CKA_PUBLIC_EXPONENT 0x00000122UL +#define CKA_PRIVATE_EXPONENT 0x00000123UL +#define CKA_PRIME_1 0x00000124UL +#define CKA_PRIME_2 0x00000125UL +#define CKA_EXPONENT_1 0x00000126UL +#define CKA_EXPONENT_2 0x00000127UL +#define CKA_COEFFICIENT 0x00000128UL +#define CKA_PUBLIC_KEY_INFO 0x00000129UL +#define CKA_PRIME 0x00000130UL +#define CKA_SUBPRIME 0x00000131UL +#define CKA_BASE 0x00000132UL +#define CKA_PRIME_BITS 0x00000133UL +#define CKA_SUBPRIME_BITS 0x00000134UL +#define CKA_SUB_PRIME_BITS 0x00000134UL +#define CKA_VALUE_BITS 0x00000160UL +#define CKA_VALUE_LEN 0x00000161UL +#define CKA_EXTRACTABLE 0x00000162UL +#define CKA_LOCAL 0x00000163UL +#define CKA_NEVER_EXTRACTABLE 0x00000164UL +#define CKA_ALWAYS_SENSITIVE 0x00000165UL +#define CKA_KEY_GEN_MECHANISM 0x00000166UL +#define CKA_MODIFIABLE 0x00000170UL +#define CKA_COPYABLE 0x00000171UL +#define CKA_DESTROYABLE 0x00000172UL +#define CKA_EC_PARAMS 0x00000180UL +#define CKA_EC_POINT 0x00000181UL +#define CKA_ALWAYS_AUTHENTICATE 0x00000202UL +#define CKA_WRAP_WITH_TRUSTED 0x00000210UL +#define CKA_OTP_FORMAT 0x00000220UL +#define CKA_OTP_LENGTH 0x00000221UL +#define CKA_OTP_TIME_INTERVAL 0x00000222UL +#define CKA_OTP_USER_FRIENDLY_MODE 0x00000223UL +#define CKA_OTP_CHALLENGE_REQUIREMENT 0x00000224UL +#define CKA_OTP_TIME_REQUIREMENT 0x00000225UL +#define CKA_OTP_COUNTER_REQUIREMENT 0x00000226UL +#define CKA_OTP_PIN_REQUIREMENT 0x00000227UL +#define CKA_OTP_COUNTER 0x0000022EUL +#define CKA_OTP_TIME 0x0000022FUL +#define CKA_OTP_USER_IDENTIFIER 0x0000022AUL +#define CKA_OTP_SERVICE_IDENTIFIER 0x0000022BUL +#define CKA_OTP_SERVICE_LOGO 0x0000022CUL +#define CKA_OTP_SERVICE_LOGO_TYPE 0x0000022DUL +#define CKA_GOSTR3410_PARAMS 0x00000250UL +#define CKA_GOSTR3411_PARAMS 0x00000251UL +#define CKA_GOST28147_PARAMS 0x00000252UL +#define CKA_HW_FEATURE_TYPE 0x00000300UL +#define CKA_RESET_ON_INIT 0x00000301UL +#define CKA_HAS_RESET 0x00000302UL +#define CKA_PIXEL_X 0x00000400UL +#define CKA_PIXEL_Y 0x00000401UL +#define CKA_RESOLUTION 0x00000402UL +#define CKA_CHAR_ROWS 0x00000403UL +#define CKA_CHAR_COLUMNS 0x00000404UL +#define CKA_COLOR 0x00000405UL +#define CKA_BITS_PER_PIXEL 0x00000406UL +#define CKA_CHAR_SETS 0x00000480UL +#define CKA_ENCODING_METHODS 0x00000481UL +#define CKA_MIME_TYPES 0x00000482UL +#define CKA_MECHANISM_TYPE 0x00000500UL +#define CKA_REQUIRED_CMS_ATTRIBUTES 0x00000501UL +#define CKA_DEFAULT_CMS_ATTRIBUTES 0x00000502UL +#define CKA_SUPPORTED_CMS_ATTRIBUTES 0x00000503UL +#define CKA_VENDOR_DEFINED 0x80000000UL +/* Array attributes */ +#define CKA_WRAP_TEMPLATE 0x40000211UL +#define CKA_UNWRAP_TEMPLATE 0x40000212UL +#define CKA_DERIVE_TEMPLATE 0x40000213UL +#define CKA_ALLOWED_MECHANISMS 0x40000600UL +/* Deprecated */ +#ifdef PKCS11_DEPRECATED +#define CKA_ECDSA_PARAMS 0x00000180UL +#define CKA_SECONDARY_AUTH 0x00000200UL +#define CKA_AUTH_PIN_FLAGS 0x00000201UL #endif -/* Before including this file (pkcs11.h) (or pkcs11t.h by - * itself), 5 platform-specific macros must be defined. These - * macros are described below, and typical definitions for them - * are also given. Be advised that these definitions can depend - * on both the platform and the compiler used (and possibly also - * on whether a Cryptoki library is linked statically or - * dynamically). - * - * In addition to defining these 5 macros, the packing convention - * for Cryptoki structures should be set. The Cryptoki - * convention on packing is that structures should be 1-byte - * aligned. - * - * If you're using Microsoft Developer Studio 5.0 to produce - * Win32 stuff, this might be done by using the following - * preprocessor directive before including pkcs11.h or pkcs11t.h: - * - * #pragma pack(push, cryptoki, 1) - * - * and using the following preprocessor directive after including - * pkcs11.h or pkcs11t.h: - * - * #pragma pack(pop, cryptoki) - * - * If you're using an earlier version of Microsoft Developer - * Studio to produce Win16 stuff, this might be done by using - * the following preprocessor directive before including - * pkcs11.h or pkcs11t.h: - * - * #pragma pack(1) - * - * In a UNIX environment, you're on your own for this. You might - * not need to do (or be able to do!) anything. - * - * - * Now for the macros: - * - * - * 1. CK_PTR: The indirection string for making a pointer to an - * object. It can be used like this: - * - * typedef CK_BYTE CK_PTR CK_BYTE_PTR; - * - * If you're using Microsoft Developer Studio 5.0 to produce - * Win32 stuff, it might be defined by: - * - * #define CK_PTR * - * - * If you're using an earlier version of Microsoft Developer - * Studio to produce Win16 stuff, it might be defined by: - * - * #define CK_PTR far * - * - * In a typical UNIX environment, it might be defined by: - * - * #define CK_PTR * - * - * - * 2. CK_DECLARE_FUNCTION(returnType, name): A macro which makes - * an importable Cryptoki library function declaration out of a - * return type and a function name. It should be used in the - * following fashion: - * - * extern CK_DECLARE_FUNCTION(CK_RV, C_Initialize)( - * CK_VOID_PTR pReserved - * ); - * - * If you're using Microsoft Developer Studio 5.0 to declare a - * function in a Win32 Cryptoki .dll, it might be defined by: - * - * #define CK_DECLARE_FUNCTION(returnType, name) \ - * returnType __declspec(dllimport) name - * - * If you're using an earlier version of Microsoft Developer - * Studio to declare a function in a Win16 Cryptoki .dll, it - * might be defined by: - * - * #define CK_DECLARE_FUNCTION(returnType, name) \ - * returnType __export _far _pascal name - * - * In a UNIX environment, it might be defined by: - * - * #define CK_DECLARE_FUNCTION(returnType, name) \ - * returnType name - * - * - * 3. CK_DECLARE_FUNCTION_POINTER(returnType, name): A macro - * which makes a Cryptoki API function pointer declaration or - * function pointer type declaration out of a return type and a - * function name. It should be used in the following fashion: - * - * // Define funcPtr to be a pointer to a Cryptoki API function - * // taking arguments args and returning CK_RV. - * CK_DECLARE_FUNCTION_POINTER(CK_RV, funcPtr)(args); - * - * or - * - * // Define funcPtrType to be the type of a pointer to a - * // Cryptoki API function taking arguments args and returning - * // CK_RV, and then define funcPtr to be a variable of type - * // funcPtrType. - * typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, funcPtrType)(args); - * funcPtrType funcPtr; - * - * If you're using Microsoft Developer Studio 5.0 to access - * functions in a Win32 Cryptoki .dll, in might be defined by: - * - * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \ - * returnType __declspec(dllimport) (* name) - * - * If you're using an earlier version of Microsoft Developer - * Studio to access functions in a Win16 Cryptoki .dll, it might - * be defined by: - * - * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \ - * returnType __export _far _pascal (* name) - * - * In a UNIX environment, it might be defined by: - * - * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \ - * returnType (* name) - * - * - * 4. CK_CALLBACK_FUNCTION(returnType, name): A macro which makes - * a function pointer type for an application callback out of - * a return type for the callback and a name for the callback. - * It should be used in the following fashion: - * - * CK_CALLBACK_FUNCTION(CK_RV, myCallback)(args); - * - * to declare a function pointer, myCallback, to a callback - * which takes arguments args and returns a CK_RV. It can also - * be used like this: - * - * typedef CK_CALLBACK_FUNCTION(CK_RV, myCallbackType)(args); - * myCallbackType myCallback; - * - * If you're using Microsoft Developer Studio 5.0 to do Win32 - * Cryptoki development, it might be defined by: - * - * #define CK_CALLBACK_FUNCTION(returnType, name) \ - * returnType (* name) - * - * If you're using an earlier version of Microsoft Developer - * Studio to do Win16 development, it might be defined by: - * - * #define CK_CALLBACK_FUNCTION(returnType, name) \ - * returnType _far _pascal (* name) - * - * In a UNIX environment, it might be defined by: - * - * #define CK_CALLBACK_FUNCTION(returnType, name) \ - * returnType (* name) - * - * - * 5. NULL_PTR: This macro is the value of a NULL pointer. - * - * In any ANSI/ISO C environment (and in many others as well), - * this should best be defined by - * - * #ifndef NULL_PTR - * #define NULL_PTR 0 - * #endif - */ +/* CKC */ +#define CKC_X_509 0x00000000UL +#define CKC_X_509_ATTR_CERT 0x00000001UL +#define CKC_WTLS 0x00000002UL +#define CKC_VENDOR_DEFINED 0x80000000UL +/* CKD */ +#define CKD_NULL 0x00000001UL +#define CKD_SHA1_KDF 0x00000002UL +#define CKD_SHA1_KDF_ASN1 0x00000003UL +#define CKD_SHA1_KDF_CONCATENATE 0x00000004UL +#define CKD_SHA224_KDF 0x00000005UL +#define CKD_SHA256_KDF 0x00000006UL +#define CKD_SHA384_KDF 0x00000007UL +#define CKD_SHA512_KDF 0x00000008UL +#define CKD_CPDIVERSIFY_KDF 0x00000009UL -/* All the various Cryptoki types and #define'd values are in the - * file pkcs11t.h. - */ -#include "pkcs11t.h" +/* CFK (array attributes) */ +#define CKF_ARRAY_ATTRIBUTE 0x40000000UL -#define __PASTE(x,y) x##y +/* CKF (capabilities) */ +#define CKF_LIBRARY_CANT_CREATE_OS_THREADS 0x00000001UL +#define CKF_OS_LOCKING_OK 0x00000002UL +/* CKF (mechanism) */ +#define CKF_HW 0x00000001UL +#define CKF_ENCRYPT 0x00000100UL +#define CKF_DECRYPT 0x00000200UL +#define CKF_DIGEST 0x00000400UL +#define CKF_SIGN 0x00000800UL +#define CKF_SIGN_RECOVER 0x00001000UL +#define CKF_VERIFY 0x00002000UL +#define CKF_VERIFY_RECOVER 0x00004000UL +#define CKF_GENERATE 0x00008000UL +#define CKF_GENERATE_KEY_PAIR 0x00010000UL +#define CKF_WRAP 0x00020000UL +#define CKF_UNWRAP 0x00040000UL +#define CKF_DERIVE 0x00080000UL +#define CKF_EC_F_P 0x00100000UL +#define CKF_EC_F_2M 0x00200000UL +#define CKF_EC_ECPARAMETERS 0x00400000UL +#define CKF_EC_NAMEDCURVE 0x00800000U +#define CKF_EC_UNCOMPRESS 0x01000000UL +#define CKF_EC_COMPRESS 0x02000000UL +#define CKF_EXTENSION 0x80000000UL -/* ============================================================== - * Define the "extern" form of all the entry points. - * ============================================================== - */ +/* CKF (OTP) */ +#define CKF_NEXT_OTP 0x00000001UL +#define CKF_EXCLUDE_TIME 0x00000002UL +#define CKF_EXCLUDE_COUNTER 0x00000004UL +#define CKF_EXCLUDE_CHALLENGE 0x00000008UL +#define CKF_EXCLUDE_PIN 0x00000010UL +#define CKF_USER_FRIENDLY_OTP 0x00000020UL -#define CK_NEED_ARG_LIST 1 -#define CK_PKCS11_FUNCTION_INFO(name) \ - extern CK_DECLARE_FUNCTION(CK_RV, name) +/* CKF (paramters to functions) */ +#define CKF_DONT_BLOCK 1 -/* pkcs11f.h has all the information about the Cryptoki - * function prototypes. - */ -#include "pkcs11f.h" +/* CKF (session) */ +#define CKF_RW_SESSION 0x00000002UL +#define CKF_SERIAL_SESSION 0x00000004UL -#undef CK_NEED_ARG_LIST -#undef CK_PKCS11_FUNCTION_INFO +/* CFK (slot) */ +#define CKF_TOKEN_PRESENT 0x00000001UL +#define CKF_REMOVABLE_DEVICE 0x00000002UL +#define CKF_HW_SLOT 0x00000004UL +/* CKF (token) */ +#define CKF_RNG 0x00000001UL +#define CKF_WRITE_PROTECTED 0x00000002UL +#define CKF_LOGIN_REQUIRED 0x00000004UL +#define CKF_USER_PIN_INITIALIZED 0x00000008UL +#define CKF_RESTORE_KEY_NOT_NEEDED 0x00000020UL +#define CKF_CLOCK_ON_TOKEN 0x00000040UL +#define CKF_PROTECTED_AUTHENTICATION_PATH 0x00000100UL +#define CKF_DUAL_CRYPTO_OPERATIONS 0x00000200UL +#define CKF_TOKEN_INITIALIZED 0x00000400UL +#define CKF_SECONDARY_AUTHENTICATION 0x00000800UL +#define CKF_USER_PIN_COUNT_LOW 0x00010000UL +#define CKF_USER_PIN_FINAL_TRY 0x00020000UL +#define CKF_USER_PIN_LOCKED 0x00040000UL +#define CKF_USER_PIN_TO_BE_CHANGED 0x00080000UL +#define CKF_SO_PIN_COUNT_LOW 0x00100000UL +#define CKF_SO_PIN_FINAL_TRY 0x00200000UL +#define CKF_SO_PIN_LOCKED 0x00400000UL +#define CKF_SO_PIN_TO_BE_CHANGED 0x00800000UL +#define CKF_ERROR_STATE 0x01000000UL -/* ============================================================== - * Define the typedef form of all the entry points. That is, for - * each Cryptoki function C_XXX, define a type CK_C_XXX which is - * a pointer to that kind of function. - * ============================================================== - */ +/* CKG (MFG) */ +#define CKG_MGF1_SHA1 0x00000001UL +#define CKG_MGF1_SHA256 0x00000002UL +#define CKG_MGF1_SHA384 0x00000003UL +#define CKG_MGF1_SHA512 0x00000004UL +#define CKG_MGF1_SHA224 0x00000005UL -#define CK_NEED_ARG_LIST 1 -#define CK_PKCS11_FUNCTION_INFO(name) \ - typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, __PASTE(CK_,name)) +/* CKH */ +#define CKH_MONOTONIC_COUNTER 0x00000001UL +#define CKH_CLOCK 0x00000002UL +#define CKH_USER_INTERFACE 0x00000003UL +#define CKH_VENDOR_DEFINED 0x80000000UL -/* pkcs11f.h has all the information about the Cryptoki - * function prototypes. - */ -#include "pkcs11f.h" +/* CKK */ +#define CKK_RSA 0x00000000UL +#define CKK_DSA 0x00000001UL +#define CKK_DH 0x00000002UL +#define CKK_EC 0x00000003UL +#define CKK_X9_42_DH 0x00000004UL +#define CKK_KEA 0x00000005UL +#define CKK_GENERIC_SECRET 0x00000010UL +#define CKK_RC2 0x00000011UL +#define CKK_RC4 0x00000012UL +#define CKK_DES 0x00000013UL +#define CKK_DES2 0x00000014UL +#define CKK_DES3 0x00000015UL +#define CKK_CAST 0x00000016UL +#define CKK_CAST3 0x00000017UL +#define CKK_CAST128 0x00000018UL +#define CKK_RC5 0x00000019UL +#define CKK_IDEA 0x0000001AUL +#define CKK_SKIPJACK 0x0000001BUL +#define CKK_BATON 0x0000001CUL +#define CKK_JUNIPER 0x0000001DUL +#define CKK_CDMF 0x0000001EUL +#define CKK_AES 0x0000001FUL +#define CKK_BLOWFISH 0x00000020UL +#define CKK_TWOFISH 0x00000021UL +#define CKK_SECURID 0x00000022UL +#define CKK_HOTP 0x00000023UL +#define CKK_ACTI 0x00000024UL +#define CKK_CAMELLIA 0x00000025UL +#define CKK_ARIA 0x00000026UL +#define CKK_MD5_HMAC 0x00000027UL +#define CKK_SHA_1_HMAC 0x00000028UL +#define CKK_RIPEMD128_HMAC 0x00000029UL +#define CKK_RIPEMD160_HMAC 0x0000002AUL +#define CKK_SHA256_HMAC 0x0000002BUL +#define CKK_SHA384_HMAC 0x0000002CUL +#define CKK_SHA512_HMAC 0x0000002DUL +#define CKK_SHA224_HMAC 0x0000002EUL +#define CKK_SEED 0x0000002FUL +#define CKK_GOSTR3410 0x00000030UL +#define CKK_GOSTR3411 0x00000031UL +#define CKK_GOST28147 0x00000032UL +#define CKK_VENDOR_DEFINED 0x80000000UL +/* Deprecated */ +#ifdef PKCS11_DEPRECATED +#define CKK_ECDSA 0x00000003UL +#define CKK_CAST5 0x00000018UL +#endif -#undef CK_NEED_ARG_LIST -#undef CK_PKCS11_FUNCTION_INFO +/* CKM */ +#define CKM_RSA_PKCS_KEY_PAIR_GEN 0x00000000UL +#define CKM_RSA_PKCS 0x00000001UL +#define CKM_RSA_9796 0x00000002UL +#define CKM_RSA_X_509 0x00000003UL +#define CKM_MD2_RSA_PKCS 0x00000004UL +#define CKM_MD5_RSA_PKCS 0x00000005UL +#define CKM_SHA1_RSA_PKCS 0x00000006UL +#define CKM_RIPEMD128_RSA_PKCS 0x00000007UL +#define CKM_RIPEMD160_RSA_PKCS 0x00000008UL +#define CKM_RSA_PKCS_OAEP 0x00000009UL +#define CKM_RSA_X9_31_KEY_PAIR_GEN 0x0000000AUL +#define CKM_RSA_X9_31 0x0000000BUL +#define CKM_SHA1_RSA_X9_31 0x0000000CUL +#define CKM_RSA_PKCS_PSS 0x0000000DUL +#define CKM_SHA1_RSA_PKCS_PSS 0x0000000EUL +#define CKM_DSA_KEY_PAIR_GEN 0x00000010UL +#define CKM_DSA 0x00000011UL +#define CKM_DSA_SHA1 0x00000012UL +#define CKM_DSA_SHA224 0x00000013UL +#define CKM_DSA_SHA256 0x00000014UL +#define CKM_DSA_SHA384 0x00000015UL +#define CKM_DSA_SHA512 0x00000016UL +#define CKM_DH_PKCS_KEY_PAIR_GEN 0x00000020UL +#define CKM_DH_PKCS_DERIVE 0x00000021UL +#define CKM_X9_42_DH_KEY_PAIR_GEN 0x00000030UL +#define CKM_X9_42_DH_DERIVE 0x00000031UL +#define CKM_X9_42_DH_HYBRID_DERIVE 0x00000032UL +#define CKM_X9_42_MQV_DERIVE 0x00000033UL +#define CKM_SHA256_RSA_PKCS 0x00000040UL +#define CKM_SHA384_RSA_PKCS 0x00000041UL +#define CKM_SHA512_RSA_PKCS 0x00000042UL +#define CKM_SHA256_RSA_PKCS_PSS 0x00000043UL +#define CKM_SHA384_RSA_PKCS_PSS 0x00000044UL +#define CKM_SHA512_RSA_PKCS_PSS 0x00000045UL +#define CKM_SHA224_RSA_PKCS 0x00000046UL +#define CKM_SHA224_RSA_PKCS_PSS 0x00000047UL +#define CKM_SHA512_224 0x00000048UL +#define CKM_SHA512_224_HMAC 0x00000049UL +#define CKM_SHA512_224_HMAC_GENERAL 0x0000004AUL +#define CKM_SHA512_224_KEY_DERIVATION 0x0000004BUL +#define CKM_SHA512_256 0x0000004CUL +#define CKM_SHA512_256_HMAC 0x0000004DUL +#define CKM_SHA512_256_HMAC_GENERAL 0x0000004EUL +#define CKM_SHA512_256_KEY_DERIVATION 0x0000004FUL +#define CKM_SHA512_T 0x00000050UL +#define CKM_SHA512_T_HMAC 0x00000051UL +#define CKM_SHA512_T_HMAC_GENERAL 0x00000052UL +#define CKM_SHA512_T_KEY_DERIVATION 0x00000053UL +#define CKM_RC2_KEY_GEN 0x00000100UL +#define CKM_RC2_ECB 0x00000101UL +#define CKM_RC2_CBC 0x00000102UL +#define CKM_RC2_MAC 0x00000103UL +#define CKM_RC2_MAC_GENERAL 0x00000104UL +#define CKM_RC2_CBC_PAD 0x00000105UL +#define CKM_RC4_KEY_GEN 0x00000110UL +#define CKM_RC4 0x00000111UL +#define CKM_DES_KEY_GEN 0x00000120UL +#define CKM_DES_ECB 0x00000121UL +#define CKM_DES_CBC 0x00000122UL +#define CKM_DES_MAC 0x00000123UL +#define CKM_DES_MAC_GENERAL 0x00000124UL +#define CKM_DES_CBC_PAD 0x00000125UL +#define CKM_DES2_KEY_GEN 0x00000130UL +#define CKM_DES3_KEY_GEN 0x00000131UL +#define CKM_DES3_ECB 0x00000132UL +#define CKM_DES3_CBC 0x00000133UL +#define CKM_DES3_MAC 0x00000134UL +#define CKM_DES3_MAC_GENERAL 0x00000135UL +#define CKM_DES3_CBC_PAD 0x00000136UL +#define CKM_DES3_CMAC_GENERAL 0x00000137UL +#define CKM_DES3_CMAC 0x00000138UL +#define CKM_CDMF_KEY_GEN 0x00000140UL +#define CKM_CDMF_ECB 0x00000141UL +#define CKM_CDMF_CBC 0x00000142UL +#define CKM_CDMF_MAC 0x00000143UL +#define CKM_CDMF_MAC_GENERAL 0x00000144UL +#define CKM_CDMF_CBC_PAD 0x00000145UL +#define CKM_DES_OFB64 0x00000150UL +#define CKM_DES_OFB8 0x00000151UL +#define CKM_DES_CFB64 0x00000152UL +#define CKM_DES_CFB8 0x00000153UL +#define CKM_MD2 0x00000200UL +#define CKM_MD2_HMAC 0x00000201UL +#define CKM_MD2_HMAC_GENERAL 0x00000202UL +#define CKM_MD5 0x00000210UL +#define CKM_MD5_HMAC 0x00000211UL +#define CKM_MD5_HMAC_GENERAL 0x00000212UL +#define CKM_SHA_1 0x00000220UL +#define CKM_SHA_1_HMAC 0x00000221UL +#define CKM_SHA_1_HMAC_GENERAL 0x00000222UL +#define CKM_RIPEMD128 0x00000230UL +#define CKM_RIPEMD128_HMAC 0x00000231UL +#define CKM_RIPEMD128_HMAC_GENERAL 0x00000232UL +#define CKM_RIPEMD160 0x00000240UL +#define CKM_RIPEMD160_HMAC 0x00000241UL +#define CKM_RIPEMD160_HMAC_GENERAL 0x00000242UL +#define CKM_SHA256 0x00000250UL +#define CKM_SHA256_HMAC 0x00000251UL +#define CKM_SHA256_HMAC_GENERAL 0x00000252UL +#define CKM_SHA224 0x00000255UL +#define CKM_SHA224_HMAC 0x00000256UL +#define CKM_SHA224_HMAC_GENERAL 0x00000257UL +#define CKM_SHA384 0x00000260UL +#define CKM_SHA384_HMAC 0x00000261UL +#define CKM_SHA384_HMAC_GENERAL 0x00000262UL +#define CKM_SHA512 0x00000270UL +#define CKM_SHA512_HMAC 0x00000271UL +#define CKM_SHA512_HMAC_GENERAL 0x00000272UL +#define CKM_SECURID_KEY_GEN 0x00000280UL +#define CKM_SECURID 0x00000282UL +#define CKM_HOTP_KEY_GEN 0x00000290UL +#define CKM_HOTP 0x00000291UL +#define CKM_ACTI 0x000002A0UL +#define CKM_ACTI_KEY_GEN 0x000002A1UL +#define CKM_CAST_KEY_GEN 0x00000300UL +#define CKM_CAST_ECB 0x00000301UL +#define CKM_CAST_CBC 0x00000302UL +#define CKM_CAST_MAC 0x00000303UL +#define CKM_CAST_MAC_GENERAL 0x00000304UL +#define CKM_CAST_CBC_PAD 0x00000305UL +#define CKM_CAST3_KEY_GEN 0x00000310UL +#define CKM_CAST3_ECB 0x00000311UL +#define CKM_CAST3_CBC 0x00000312UL +#define CKM_CAST3_MAC 0x00000313UL +#define CKM_CAST3_MAC_GENERAL 0x00000314UL +#define CKM_CAST3_CBC_PAD 0x00000315UL +#define CKM_CAST128_KEY_GEN 0x00000320UL +#define CKM_CAST5_ECB 0x00000321UL +#define CKM_CAST128_ECB 0x00000321UL +#define CKM_CAST128_MAC 0x00000323UL +#define CKM_CAST128_CBC 0x00000322UL +#define CKM_CAST128_MAC_GENERAL 0x00000324UL +#define CKM_CAST128_CBC_PAD 0x00000325UL +#define CKM_RC5_KEY_GEN 0x00000330UL +#define CKM_RC5_ECB 0x00000331UL +#define CKM_RC5_CBC 0x00000332UL +#define CKM_RC5_MAC 0x00000333UL +#define CKM_RC5_MAC_GENERAL 0x00000334UL +#define CKM_RC5_CBC_PAD 0x00000335UL +#define CKM_IDEA_KEY_GEN 0x00000340UL +#define CKM_IDEA_ECB 0x00000341UL +#define CKM_IDEA_CBC 0x00000342UL +#define CKM_IDEA_MAC 0x00000343UL +#define CKM_IDEA_MAC_GENERAL 0x00000344UL +#define CKM_IDEA_CBC_PAD 0x00000345UL +#define CKM_GENERIC_SECRET_KEY_GEN 0x00000350UL +#define CKM_CONCATENATE_BASE_AND_KEY 0x00000360UL +#define CKM_CONCATENATE_BASE_AND_DATA 0x00000362UL +#define CKM_CONCATENATE_DATA_AND_BASE 0x00000363UL +#define CKM_XOR_BASE_AND_DATA 0x00000364UL +#define CKM_EXTRACT_KEY_FROM_KEY 0x00000365UL +#define CKM_SSL3_PRE_MASTER_KEY_GEN 0x00000370UL +#define CKM_SSL3_MASTER_KEY_DERIVE 0x00000371UL +#define CKM_SSL3_KEY_AND_MAC_DERIVE 0x00000372UL +#define CKM_SSL3_MASTER_KEY_DERIVE_DH 0x00000373UL +#define CKM_TLS_PRE_MASTER_KEY_GEN 0x00000374UL +#define CKM_TLS_MASTER_KEY_DERIVE 0x00000375UL +#define CKM_TLS_KEY_AND_MAC_DERIVE 0x00000376UL +#define CKM_TLS_MASTER_KEY_DERIVE_DH 0x00000377UL +#define CKM_TLS_PRF 0x00000378UL +#define CKM_SSL3_MD5_MAC 0x00000380UL +#define CKM_SSL3_SHA1_MAC 0x00000381UL +#define CKM_MD5_KEY_DERIVATION 0x00000390UL +#define CKM_MD2_KEY_DERIVATION 0x00000391UL +#define CKM_SHA1_KEY_DERIVATION 0x00000392UL +#define CKM_SHA256_KEY_DERIVATION 0x00000393UL +#define CKM_SHA384_KEY_DERIVATION 0x00000394UL +#define CKM_SHA512_KEY_DERIVATION 0x00000395UL +#define CKM_SHA224_KEY_DERIVATION 0x00000396UL +#define CKM_PBE_MD2_DES_CBC 0x000003A0UL +#define CKM_PBE_MD5_DES_CBC 0x000003A1UL +#define CKM_PBE_MD5_CAST_CBC 0x000003A2UL +#define CKM_PBE_MD5_CAST3_CBC 0x000003A3UL +#define CKM_PBE_MD5_CAST128_CBC 0x000003A4UL +#define CKM_PBE_SHA1_CAST128_CBC 0x000003A5UL +#define CKM_PBE_SHA1_RC4_128 0x000003A6UL +#define CKM_PBE_SHA1_RC4_40 0x000003A7UL +#define CKM_PBE_SHA1_DES3_EDE_CBC 0x000003A8UL +#define CKM_PBE_SHA1_DES2_EDE_CBC 0x000003A9UL +#define CKM_PBE_SHA1_RC2_128_CBC 0x000003AAUL +#define CKM_PBE_SHA1_RC2_40_CBC 0x000003ABUL +#define CKM_PKCS5_PBKD2 0x000003B0UL +#define CKM_PBA_SHA1_WITH_SHA1_HMAC 0x000003C0UL +#define CKM_WTLS_PRE_MASTER_KEY_GEN 0x000003D0UL +#define CKM_WTLS_MASTER_KEY_DERIVE 0x000003D1UL +#define CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC 0x000003D2UL +#define CKM_WTLS_PRF 0x000003D3UL +#define CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE 0x000003D4UL +#define CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE 0x000003D5UL +#define CKM_TLS10_MAC_SERVER 0x000003D6UL +#define CKM_TLS10_MAC_CLIENT 0x000003D7UL +#define CKM_TLS12_MAC 0x000003D8UL +#define CKM_TLS12_KDF 0x000003D9UL +#define CKM_TLS12_MASTER_KEY_DERIVE 0x000003E0UL +#define CKM_TLS12_KEY_AND_MAC_DERIVE 0x000003E1UL +#define CKM_TLS12_MASTER_KEY_DERIVE_DH 0x000003E2UL +#define CKM_TLS12_KEY_SAFE_DERIVE 0x000003E3UL +#define CKM_TLS_MAC 0x000003E4UL +#define CKM_TLS_KDF 0x000003E5UL +#define CKM_KEY_WRAP_LYNKS 0x00000400UL +#define CKM_KEY_WRAP_SET_OAEP 0x00000401UL +#define CKM_CMS_SIG 0x00000500UL +#define CKM_KIP_DERIVE 0x00000510UL +#define CKM_KIP_WRAP 0x00000511UL +#define CKM_KIP_MAC 0x00000512UL +#define CKM_CAMELLIA_KEY_GEN 0x00000550UL +#define CKM_CAMELLIA_ECB 0x00000551UL +#define CKM_CAMELLIA_CBC 0x00000552UL +#define CKM_CAMELLIA_MAC 0x00000553UL +#define CKM_CAMELLIA_MAC_GENERAL 0x00000554UL +#define CKM_CAMELLIA_CBC_PAD 0x00000555UL +#define CKM_CAMELLIA_ECB_ENCRYPT_DATA 0x00000556UL +#define CKM_CAMELLIA_CBC_ENCRYPT_DATA 0x00000557UL +#define CKM_CAMELLIA_CTR 0x00000558UL +#define CKM_ARIA_KEY_GEN 0x00000560UL +#define CKM_ARIA_ECB 0x00000561UL +#define CKM_ARIA_CBC 0x00000562UL +#define CKM_ARIA_MAC 0x00000563UL +#define CKM_ARIA_MAC_GENERAL 0x00000564UL +#define CKM_ARIA_CBC_PAD 0x00000565UL +#define CKM_ARIA_ECB_ENCRYPT_DATA 0x00000566UL +#define CKM_ARIA_CBC_ENCRYPT_DATA 0x00000567UL +#define CKM_SEED_KEY_GEN 0x00000650UL +#define CKM_SEED_ECB 0x00000651UL +#define CKM_SEED_CBC 0x00000652UL +#define CKM_SEED_MAC 0x00000653UL +#define CKM_SEED_MAC_GENERAL 0x00000654UL +#define CKM_SEED_CBC_PAD 0x00000655UL +#define CKM_SEED_ECB_ENCRYPT_DATA 0x00000656UL +#define CKM_SEED_CBC_ENCRYPT_DATA 0x00000657UL +#define CKM_SKIPJACK_KEY_GEN 0x00001000UL +#define CKM_SKIPJACK_ECB64 0x00001001UL +#define CKM_SKIPJACK_CBC64 0x00001002UL +#define CKM_SKIPJACK_OFB64 0x00001003UL +#define CKM_SKIPJACK_CFB64 0x00001004UL +#define CKM_SKIPJACK_CFB32 0x00001005UL +#define CKM_SKIPJACK_CFB16 0x00001006UL +#define CKM_SKIPJACK_CFB8 0x00001007UL +#define CKM_SKIPJACK_WRAP 0x00001008UL +#define CKM_SKIPJACK_PRIVATE_WRAP 0x00001009UL +#define CKM_SKIPJACK_RELAYX 0x0000100AUL +#define CKM_KEA_KEY_PAIR_GEN 0x00001010UL +#define CKM_KEA_KEY_DERIVE 0x00001011UL +#define CKM_KEA_DERIVE 0x00001012UL +#define CKM_FORTEZZA_TIMESTAMP 0x00001020UL +#define CKM_BATON_KEY_GEN 0x00001030UL +#define CKM_BATON_ECB128 0x00001031UL +#define CKM_BATON_ECB96 0x00001032UL +#define CKM_BATON_CBC128 0x00001033UL +#define CKM_BATON_COUNTER 0x00001034UL +#define CKM_BATON_SHUFFLE 0x00001035UL +#define CKM_BATON_WRAP 0x00001036UL +#define CKM_EC_KEY_PAIR_GEN 0x00001040UL +#define CKM_ECDSA 0x00001041UL +#define CKM_ECDSA_SHA1 0x00001042UL +#define CKM_ECDSA_SHA224 0x00001043UL +#define CKM_ECDSA_SHA256 0x00001044UL +#define CKM_ECDSA_SHA384 0x00001045UL +#define CKM_ECDSA_SHA512 0x00001046UL +#define CKM_ECDH1_DERIVE 0x00001050UL +#define CKM_ECDH1_COFACTOR_DERIVE 0x00001051UL +#define CKM_ECMQV_DERIVE 0x00001052UL +#define CKM_ECDH_AES_KEY_WRAP 0x00001053UL +#define CKM_RSA_AES_KEY_WRAP 0x00001054UL +#define CKM_JUNIPER_KEY_GEN 0x00001060UL +#define CKM_JUNIPER_ECB128 0x00001061UL +#define CKM_JUNIPER_CBC128 0x00001062UL +#define CKM_JUNIPER_COUNTER 0x00001063UL +#define CKM_JUNIPER_SHUFFLE 0x00001064UL +#define CKM_JUNIPER_WRAP 0x00001065UL +#define CKM_FASTHASH 0x00001070UL +#define CKM_AES_KEY_GEN 0x00001080UL +#define CKM_AES_ECB 0x00001081UL +#define CKM_AES_CBC 0x00001082UL +#define CKM_AES_MAC 0x00001083UL +#define CKM_AES_MAC_GENERAL 0x00001084UL +#define CKM_AES_CBC_PAD 0x00001085UL +#define CKM_AES_CTR 0x00001086UL +#define CKM_AES_GCM 0x00001087UL +#define CKM_AES_CCM 0x00001088UL +#define CKM_AES_CTS 0x00001089UL +#define CKM_AES_CMAC 0x0000108AUL +#define CKM_AES_CMAC_GENERAL 0x0000108BUL +#define CKM_AES_XCBC_MAC 0x0000108CUL +#define CKM_AES_XCBC_MAC_96 0x0000108DUL +#define CKM_AES_GMAC 0x0000108EUL +#define CKM_BLOWFISH_KEY_GEN 0x00001090UL +#define CKM_BLOWFISH_CBC 0x00001091UL +#define CKM_TWOFISH_KEY_GEN 0x00001092UL +#define CKM_TWOFISH_CBC 0x00001093UL +#define CKM_BLOWFISH_CBC_PAD 0x00001094UL +#define CKM_TWOFISH_CBC_PAD 0x00001095UL +#define CKM_DES_ECB_ENCRYPT_DATA 0x00001100UL +#define CKM_DES_CBC_ENCRYPT_DATA 0x00001101UL +#define CKM_DES3_ECB_ENCRYPT_DATA 0x00001102UL +#define CKM_DES3_CBC_ENCRYPT_DATA 0x00001103UL +#define CKM_AES_ECB_ENCRYPT_DATA 0x00001104UL +#define CKM_AES_CBC_ENCRYPT_DATA 0x00001105UL +#define CKM_GOSTR3410_KEY_PAIR_GEN 0x00001200UL +#define CKM_GOSTR3410 0x00001201UL +#define CKM_GOSTR3410_WITH_GOSTR3411 0x00001202UL +#define CKM_GOSTR3410_KEY_WRAP 0x00001203UL +#define CKM_GOSTR3410_DERIVE 0x00001204UL +#define CKM_GOSTR3411 0x00001210UL +#define CKM_GOSTR3411_HMAC 0x00001211UL +#define CKM_GOST28147_KEY_GEN 0x00001220UL +#define CKM_GOST28147_ECB 0x00001221UL +#define CKM_GOST28147 0x00001222UL +#define CKM_GOST28147_MAC 0x00001223UL +#define CKM_GOST28147_KEY_WRAP 0x00001224UL +#define CKM_DSA_PARAMETER_GEN 0x00002000UL +#define CKM_DH_PKCS_PARAMETER_GEN 0x00002001UL +#define CKM_X9_42_DH_PARAMETER_GEN 0x00002002UL +#define CKM_DSA_PROBABILISTIC_PARAMETER_GEN 0x00002003UL +#define CKM_DSA_PROBABLISTIC_PARAMETER_GEN 0x00002003UL +#define CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN 0x00002004UL +#define CKM_AES_OFB 0x00002104UL +#define CKM_AES_CFB64 0x00002105UL +#define CKM_AES_CFB8 0x00002106UL +#define CKM_AES_CFB128 0x00002107UL +#define CKM_AES_CFB1 0x00002108UL +#define CKM_AES_KEY_WRAP 0x00002109UL +#define CKM_AES_KEY_WRAP_PAD 0x0000210AUL +#define CKM_RSA_PKCS_TPM_1_1 0x00004001UL +#define CKM_RSA_PKCS_OAEP_TPM_1_1 0x00004002UL +#define CKM_VENDOR_DEFINED 0x80000000UL +/* Deprecated */ +#ifdef PKCS11_DEPRECATED +#define CKM_CAST5_KEY_GEN 0x00000320UL +#define CKM_CAST5_CBC 0x00000322UL +#define CKM_CAST5_MAC 0x00000323UL +#define CKM_CAST5_MAC_GENERAL 0x00000324UL +#define CKM_CAST5_CBC_PAD 0x00000325UL +#define CKM_PBE_MD5_CAST5_CBC 0x000003A4UL +#define CKM_PBE_SHA1_CAST5_CBC 0x000003A5UL +#define CKM_ECDSA_KEY_PAIR_GEN 0x00001040UL +#endif +/* CKN */ +#define CKN_SURRENDER 0UL +#define CKN_OTP_CHANGED 1UL -/* ============================================================== - * Define structed vector of entry points. A CK_FUNCTION_LIST - * contains a CK_VERSION indicating a library's Cryptoki version - * and then a whole slew of function pointers to the routines in - * the library. This type was declared, but not defined, in - * pkcs11t.h. - * ============================================================== - */ +/* CKO */ +#define CKO_DATA 0x00000000UL +#define CKO_CERTIFICATE 0x00000001UL +#define CKO_PUBLIC_KEY 0x00000002UL +#define CKO_PRIVATE_KEY 0x00000003UL +#define CKO_SECRET_KEY 0x00000004UL +#define CKO_HW_FEATURE 0x00000005UL +#define CKO_DOMAIN_PARAMETERS 0x00000006UL +#define CKO_MECHANISM 0x00000007UL +#define CKO_OTP_KEY 0x00000008UL +#define CKO_VENDOR_DEFINED 0x80000000UL -#define CK_PKCS11_FUNCTION_INFO(name) \ - __PASTE(CK_,name) name; +/* CKP (PBKD2) */ +#define CKP_PKCS5_PBKD2_HMAC_SHA1 0x00000001UL +#define CKP_PKCS5_PBKD2_HMAC_GOSTR3411 0x00000002UL +#define CKP_PKCS5_PBKD2_HMAC_SHA224 0x00000003UL +#define CKP_PKCS5_PBKD2_HMAC_SHA256 0x00000004UL +#define CKP_PKCS5_PBKD2_HMAC_SHA384 0x00000005UL +#define CKP_PKCS5_PBKD2_HMAC_SHA512 0x00000006UL +#define CKP_PKCS5_PBKD2_HMAC_SHA512_224 0x00000007UL +#define CKP_PKCS5_PBKD2_HMAC_SHA512_256 0x00000008UL -struct CK_FUNCTION_LIST { +/* CKR */ +#define CKR_OK 0x00000000UL +#define CKR_CANCEL 0x00000001UL +#define CKR_HOST_MEMORY 0x00000002UL +#define CKR_SLOT_ID_INVALID 0x00000003UL +#define CKR_GENERAL_ERROR 0x00000005UL +#define CKR_FUNCTION_FAILED 0x00000006UL +#define CKR_ARGUMENTS_BAD 0x00000007UL +#define CKR_NO_EVENT 0x00000008UL +#define CKR_NEED_TO_CREATE_THREADS 0x00000009UL +#define CKR_CANT_LOCK 0x0000000AUL +#define CKR_ATTRIBUTE_READ_ONLY 0x00000010UL +#define CKR_ATTRIBUTE_SENSITIVE 0x00000011UL +#define CKR_ATTRIBUTE_TYPE_INVALID 0x00000012UL +#define CKR_ATTRIBUTE_VALUE_INVALID 0x00000013UL +#define CKR_ACTION_PROHIBITED 0x0000001BUL +#define CKR_DATA_INVALID 0x00000020UL +#define CKR_DATA_LEN_RANGE 0x00000021UL +#define CKR_DEVICE_ERROR 0x00000030UL +#define CKR_DEVICE_MEMORY 0x00000031UL +#define CKR_DEVICE_REMOVED 0x00000032UL +#define CKR_ENCRYPTED_DATA_INVALID 0x00000040UL +#define CKR_ENCRYPTED_DATA_LEN_RANGE 0x00000041UL +#define CKR_FUNCTION_CANCELED 0x00000050UL +#define CKR_FUNCTION_NOT_PARALLEL 0x00000051UL +#define CKR_FUNCTION_NOT_SUPPORTED 0x00000054UL +#define CKR_KEY_HANDLE_INVALID 0x00000060UL +#define CKR_KEY_SIZE_RANGE 0x00000062UL +#define CKR_KEY_TYPE_INCONSISTENT 0x00000063UL +#define CKR_KEY_NOT_NEEDED 0x00000064UL +#define CKR_KEY_CHANGED 0x00000065UL +#define CKR_KEY_NEEDED 0x00000066UL +#define CKR_KEY_INDIGESTIBLE 0x00000067UL +#define CKR_KEY_FUNCTION_NOT_PERMITTED 0x00000068UL +#define CKR_KEY_NOT_WRAPPABLE 0x00000069UL +#define CKR_KEY_UNEXTRACTABLE 0x0000006AUL +#define CKR_MECHANISM_INVALID 0x00000070UL +#define CKR_MECHANISM_PARAM_INVALID 0x00000071UL +#define CKR_OBJECT_HANDLE_INVALID 0x00000082UL +#define CKR_OPERATION_ACTIVE 0x00000090UL +#define CKR_OPERATION_NOT_INITIALIZED 0x00000091UL +#define CKR_PIN_INCORRECT 0x000000A0UL +#define CKR_PIN_INVALID 0x000000A1UL +#define CKR_PIN_LEN_RANGE 0x000000A2UL +#define CKR_PIN_EXPIRED 0x000000A3UL +#define CKR_PIN_LOCKED 0x000000A4UL +#define CKR_SESSION_CLOSED 0x000000B0UL +#define CKR_SESSION_COUNT 0x000000B1UL +#define CKR_SESSION_HANDLE_INVALID 0x000000B3UL +#define CKR_SESSION_PARALLEL_NOT_SUPPORTED 0x000000B4UL +#define CKR_SESSION_READ_ONLY 0x000000B5UL +#define CKR_SESSION_EXISTS 0x000000B6UL +#define CKR_SESSION_READ_ONLY_EXISTS 0x000000B7UL +#define CKR_SESSION_READ_WRITE_SO_EXISTS 0x000000B8UL +#define CKR_SIGNATURE_INVALID 0x000000C0UL +#define CKR_SIGNATURE_LEN_RANGE 0x000000C1UL +#define CKR_TEMPLATE_INCOMPLETE 0x000000D0UL +#define CKR_TEMPLATE_INCONSISTENT 0x000000D1UL +#define CKR_TOKEN_NOT_PRESENT 0x000000E0UL +#define CKR_TOKEN_NOT_RECOGNIZED 0x000000E1UL +#define CKR_TOKEN_WRITE_PROTECTED 0x000000E2UL +#define CKR_UNWRAPPING_KEY_HANDLE_INVALID 0x000000F0UL +#define CKR_UNWRAPPING_KEY_SIZE_RANGE 0x000000F1UL +#define CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT 0x000000F2UL +#define CKR_USER_ALREADY_LOGGED_IN 0x00000100UL +#define CKR_USER_NOT_LOGGED_IN 0x00000101UL +#define CKR_USER_PIN_NOT_INITIALIZED 0x00000102UL +#define CKR_USER_TYPE_INVALID 0x00000103UL +#define CKR_USER_ANOTHER_ALREADY_LOGGED_IN 0x00000104UL +#define CKR_USER_TOO_MANY_TYPES 0x00000105UL +#define CKR_WRAPPED_KEY_INVALID 0x00000110UL +#define CKR_WRAPPED_KEY_LEN_RANGE 0x00000112UL +#define CKR_WRAPPING_KEY_HANDLE_INVALID 0x00000113UL +#define CKR_WRAPPING_KEY_SIZE_RANGE 0x00000114UL +#define CKR_WRAPPING_KEY_TYPE_INCONSISTENT 0x00000115UL +#define CKR_RANDOM_SEED_NOT_SUPPORTED 0x00000120UL +#define CKR_RANDOM_NO_RNG 0x00000121UL +#define CKR_DOMAIN_PARAMS_INVALID 0x00000130UL +#define CKR_CURVE_NOT_SUPPORTED 0x00000140UL +#define CKR_BUFFER_TOO_SMALL 0x00000150UL +#define CKR_SAVED_STATE_INVALID 0x00000160UL +#define CKR_INFORMATION_SENSITIVE 0x00000170UL +#define CKR_STATE_UNSAVEABLE 0x00000180UL +#define CKR_CRYPTOKI_NOT_INITIALIZED 0x00000190UL +#define CKR_CRYPTOKI_ALREADY_INITIALIZED 0x00000191UL +#define CKR_MUTEX_BAD 0x000001A0UL +#define CKR_MUTEX_NOT_LOCKED 0x000001A1UL +#define CKR_NEW_PIN_MODE 0x000001B0UL +#define CKR_NEXT_OTP 0x000001B1UL +#define CKR_EXCEEDED_MAX_ITERATIONS 0x000001B5UL +#define CKR_FIPS_SELF_TEST_FAILED 0x000001B6UL +#define CKR_LIBRARY_LOAD_FAILED 0x000001B7UL +#define CKR_PIN_TOO_WEAK 0x000001B8UL +#define CKR_PUBLIC_KEY_INVALID 0x000001B9UL +#define CKR_FUNCTION_REJECTED 0x00000200UL +#define CKR_VENDOR_DEFINED 0x80000000UL - CK_VERSION version; /* Cryptoki version */ -/* Pile all the function pointers into the CK_FUNCTION_LIST. */ -/* pkcs11f.h has all the information about the Cryptoki - * function prototypes. - */ -#include "pkcs11f.h" +/* CKS */ +#define CKS_RO_PUBLIC_SESSION 0UL +#define CKS_RO_USER_FUNCTIONS 1UL +#define CKS_RW_PUBLIC_SESSION 2UL +#define CKS_RW_USER_FUNCTIONS 3UL +#define CKS_RW_SO_FUNCTIONS 4UL + +/* CKU */ +#define CKU_SO 0UL +#define CKU_USER 1UL +#define CKU_CONTEXT_SPECIFIC 2UL + +/* CKZ (data) */ +#define CKZ_DATA_SPECIFIED 0x00000001UL + +/* CKZ (salt) */ +#define CKZ_SALT_SPECIFIED 0x00000001UL + +/* Sundry structures type definition in alphabetical order */ +#define STRUCTDEF(__name__) \ +struct __name__; \ +typedef struct __name__ __name__; \ +typedef struct __name__ * __name__ ## _PTR; \ +typedef struct __name__ ** __name__ ## _PTR_PTR + +STRUCTDEF(CK_ATTRIBUTE); +STRUCTDEF(CK_C_INITIALIZE_ARGS); +STRUCTDEF(CK_DATE); +STRUCTDEF(CK_FUNCTION_LIST); +STRUCTDEF(CK_FUNCTION_LIST_3_0); +STRUCTDEF(CK_INFO); +STRUCTDEF(CK_MECHANISM); +STRUCTDEF(CK_MECHANISM_INFO); +STRUCTDEF(CK_SESSION_INFO); +STRUCTDEF(CK_SLOT_INFO); +STRUCTDEF(CK_TOKEN_INFO); +STRUCTDEF(CK_VERSION); + +/* Function type definitions */ +typedef CK_RV (* CK_NOTIFY)(CK_SESSION_HANDLE, CK_NOTIFICATION, void *); +typedef CK_RV (* CK_CREATEMUTEX)(void **); +typedef CK_RV (* CK_DESTROYMUTEX)(void *); +typedef CK_RV (* CK_LOCKMUTEX)(void *); +typedef CK_RV (* CK_UNLOCKMUTEX)(void *); +/* General Structure definitions */ +struct CK_ATTRIBUTE { + CK_ATTRIBUTE_TYPE type; + void * pValue; + CK_ULONG ulValueLen; }; -#undef CK_PKCS11_FUNCTION_INFO +struct CK_C_INITIALIZE_ARGS { + CK_CREATEMUTEX CreateMutex; + CK_DESTROYMUTEX DestroyMutex; + CK_LOCKMUTEX LockMutex; + CK_UNLOCKMUTEX UnlockMutex; + CK_FLAGS flags; + void * pReserved; +}; +struct CK_DATE{ + CK_CHAR year[4]; + CK_CHAR month[2]; + CK_CHAR day[2]; +}; -#undef __PASTE +struct CK_VERSION { + CK_BYTE major; + CK_BYTE minor; +}; -#ifdef __cplusplus -} -#endif +struct CK_INFO { + struct CK_VERSION cryptokiVersion; + CK_UTF8CHAR manufacturerID[32]; + CK_FLAGS flags; + CK_UTF8CHAR libraryDescription[32]; + struct CK_VERSION libraryVersion; +}; + +struct CK_MECHANISM { + CK_MECHANISM_TYPE mechanism; + void * pParameter; + CK_ULONG ulParameterLen; +}; + +struct CK_MECHANISM_INFO { + CK_ULONG ulMinKeySize; + CK_ULONG ulMaxKeySize; + CK_FLAGS flags; +}; + +struct CK_SESSION_INFO { + CK_SLOT_ID slotID; + CK_STATE state; + CK_FLAGS flags; + CK_ULONG ulDeviceError; +}; + +struct CK_SLOT_INFO { + CK_UTF8CHAR slotDescription[64]; + CK_UTF8CHAR manufacturerID[32]; + CK_FLAGS flags; + CK_VERSION hardwareVersion; + CK_VERSION firmwareVersion; +}; + +struct CK_TOKEN_INFO { + CK_UTF8CHAR label[32]; + CK_UTF8CHAR manufacturerID[32]; + CK_UTF8CHAR model[16]; + CK_CHAR serialNumber[16]; + CK_FLAGS flags; + CK_ULONG ulMaxSessionCount; + CK_ULONG ulSessionCount; + CK_ULONG ulMaxRwSessionCount; + CK_ULONG ulRwSessionCount; + CK_ULONG ulMaxPinLen; + CK_ULONG ulMinPinLen; + CK_ULONG ulTotalPublicMemory; + CK_ULONG ulFreePublicMemory; + CK_ULONG ulTotalPrivateMemory; + CK_ULONG ulFreePrivateMemory; + CK_VERSION hardwareVersion; + CK_VERSION firmwareVersion; + CK_CHAR utcTime[16]; +}; + +/* Param Structure definitions in alphabetical order */ +STRUCTDEF(CK_AES_CBC_ENCRYPT_DATA_PARAMS); +STRUCTDEF(CK_AES_CCM_PARAMS); +STRUCTDEF(CK_AES_CTR_PARAMS); +STRUCTDEF(CK_AES_GCM_PARAMS); +STRUCTDEF(CK_ARIA_CBC_ENCRYPT_DATA_PARAMS); +STRUCTDEF(CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS); +STRUCTDEF(CK_CAMELLIA_CTR_PARAMS); +STRUCTDEF(CK_CCM_PARAMS); +STRUCTDEF(CK_CMS_SIG_PARAMS); +STRUCTDEF(CK_DES_CBC_ENCRYPT_DATA_PARAMS); +STRUCTDEF(CK_DSA_PARAMETER_GEN_PARAM); +STRUCTDEF(CK_ECDH_AES_KEY_WRAP_PARAMS); +STRUCTDEF(CK_ECDH1_DERIVE_PARAMS); +STRUCTDEF(CK_ECDH2_DERIVE_PARAMS); +STRUCTDEF(CK_ECMQV_DERIVE_PARAMS); +STRUCTDEF(CK_GCM_PARAMS); +STRUCTDEF(CK_GOSTR3410_DERIVE_PARAMS); +STRUCTDEF(CK_GOSTR3410_KEY_WRAP_PARAMS); +STRUCTDEF(CK_KEA_DERIVE_PARAMS); +STRUCTDEF(CK_KEY_DERIVATION_STRING_DATA); +STRUCTDEF(CK_KEY_WRAP_SET_OAEP_PARAMS); +STRUCTDEF(CK_KIP_PARAMS); +STRUCTDEF(CK_OTP_PARAM); +STRUCTDEF(CK_OTP_PARAMS); +STRUCTDEF(CK_OTP_SIGNATURE_INFO); +STRUCTDEF(CK_PBE_PARAMS); +STRUCTDEF(CK_PKCS5_PBKD2_PARAMS); +STRUCTDEF(CK_PKCS5_PBKD2_PARAMS2); +STRUCTDEF(CK_RC2_CBC_PARAMS); +STRUCTDEF(CK_RC2_MAC_GENERAL_PARAMS); +STRUCTDEF(CK_RC5_CBC_PARAMS); +STRUCTDEF(CK_RC5_MAC_GENERAL_PARAMS); +STRUCTDEF(CK_RC5_PARAMS); +STRUCTDEF(CK_RSA_AES_KEY_WRAP_PARAMS); +STRUCTDEF(CK_RSA_PKCS_OAEP_PARAMS); +STRUCTDEF(CK_RSA_PKCS_PSS_PARAMS); +STRUCTDEF(CK_SEED_CBC_ENCRYPT_DATA_PARAMS); +STRUCTDEF(CK_SKIPJACK_PRIVATE_WRAP_PARAMS); +STRUCTDEF(CK_SKIPJACK_RELAYX_PARAMS); +STRUCTDEF(CK_X2RATCHET_INITIALIZE_PARAMS); +STRUCTDEF(CK_X2RATCHET_RESPOND_PARAMS); +STRUCTDEF(CK_X9_42_DH1_DERIVE_PARAMS); +STRUCTDEF(CK_X9_42_DH2_DERIVE_PARAMS); +STRUCTDEF(CK_X9_42_MQV_DERIVE_PARAMS); +STRUCTDEF(specifiedParams); + +struct CK_AES_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[16]; + CK_BYTE * pData; + CK_ULONG length; +}; + +struct CK_AES_CCM_PARAMS { + CK_ULONG ulDataLen; + CK_BYTE * pNonce; + CK_ULONG ulNonceLen; + CK_BYTE * pAAD; + CK_ULONG ulAADLen; + CK_ULONG ulMACLen; +}; + +struct CK_AES_CTR_PARAMS { + CK_ULONG ulCounterBits; + CK_BYTE cb[16]; +}; + +struct CK_AES_GCM_PARAMS { + CK_BYTE * pIv; + CK_ULONG ulIvLen; + CK_ULONG ulIvBits; + CK_BYTE * pAAD; + CK_ULONG ulAADLen; + CK_ULONG ulTagBits; +}; + +struct CK_ARIA_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[16]; + CK_BYTE * pData; + CK_ULONG length; +}; + +struct CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[16]; + CK_BYTE * pData; + CK_ULONG length; +}; + +struct CK_CAMELLIA_CTR_PARAMS { + CK_ULONG ulCounterBits; + CK_BYTE cb[16]; +}; + +struct CK_CCM_PARAMS { + CK_ULONG ulDataLen; + CK_BYTE * pNonce; + CK_ULONG ulNonceLen; + CK_BYTE * pAAD; + CK_ULONG ulAADLen; + CK_ULONG ulMACLen; +}; + +struct CK_CMS_SIG_PARAMS { + CK_OBJECT_HANDLE certificateHandle; + CK_MECHANISM * pSigningMechanism; + CK_MECHANISM * pDigestMechanism; + CK_UTF8CHAR * pContentType; + CK_BYTE * pRequestedAttributes; + CK_ULONG ulRequestedAttributesLen; + CK_BYTE * pRequiredAttributes; + CK_ULONG ulRequiredAttributesLen; +}; -#endif /* _PKCS11_H_ */ +struct CK_DES_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[8]; + CK_BYTE * pData; + CK_ULONG length; +}; + +struct CK_DSA_PARAMETER_GEN_PARAM { + CK_MECHANISM_TYPE hash; + CK_BYTE * pSeed; + CK_ULONG ulSeedLen; + CK_ULONG ulIndex; +}; +struct CK_ECDH_AES_KEY_WRAP_PARAMS { + CK_ULONG ulAESKeyBits; + CK_EC_KDF_TYPE kdf; + CK_ULONG ulSharedDataLen; + CK_BYTE * pSharedData; +}; + +struct CK_ECDH1_DERIVE_PARAMS { + CK_EC_KDF_TYPE kdf; + CK_ULONG ulSharedDataLen; + CK_BYTE * pSharedData; + CK_ULONG ulPublicDataLen; + CK_BYTE * pPublicData; +}; + +struct CK_ECDH2_DERIVE_PARAMS { + CK_EC_KDF_TYPE kdf; + CK_ULONG ulSharedDataLen; + CK_BYTE * pSharedData; + CK_ULONG ulPublicDataLen; + CK_BYTE * pPublicData; + CK_ULONG ulPrivateDataLen; + CK_OBJECT_HANDLE hPrivateData; + CK_ULONG ulPublicDataLen2; + CK_BYTE * pPublicData2; +}; + +struct CK_ECMQV_DERIVE_PARAMS { + CK_EC_KDF_TYPE kdf; + CK_ULONG ulSharedDataLen; + CK_BYTE * pSharedData; + CK_ULONG ulPublicDataLen; + CK_BYTE * pPublicData; + CK_ULONG ulPrivateDataLen; + CK_OBJECT_HANDLE hPrivateData; + CK_ULONG ulPublicDataLen2; + CK_BYTE * pPublicData2; + CK_OBJECT_HANDLE publicKey; +}; + +struct CK_GCM_PARAMS { + CK_BYTE * pIv; + CK_ULONG ulIvLen; + CK_ULONG ulIvBits; + CK_BYTE * pAAD; + CK_ULONG ulAADLen; + CK_ULONG ulTagBits; +}; + +struct CK_GOSTR3410_DERIVE_PARAMS { + CK_EC_KDF_TYPE kdf; + CK_BYTE * pPublicData; + CK_ULONG ulPublicDataLen; + CK_BYTE * pUKM; + CK_ULONG ulUKMLen; +}; + +struct CK_GOSTR3410_KEY_WRAP_PARAMS { + CK_BYTE * pWrapOID; + CK_ULONG ulWrapOIDLen; + CK_BYTE * pUKM; + CK_ULONG ulUKMLen; + CK_OBJECT_HANDLE hKey; +}; + +struct CK_KEA_DERIVE_PARAMS { + CK_BBOOL isSender; + CK_ULONG ulRandomLen; + CK_BYTE * RandomA; + CK_BYTE * RandomB; + CK_ULONG ulPublicDataLen; + CK_BYTE * PublicData; +}; + +struct CK_KEY_DERIVATION_STRING_DATA { + CK_BYTE * pData; + CK_ULONG ulLen; +}; + +struct CK_KEY_WRAP_SET_OAEP_PARAMS { + CK_BYTE bBC; + CK_BYTE * pX; + CK_ULONG ulXLen; +}; + +struct CK_KIP_PARAMS { + CK_MECHANISM * pMechanism; + CK_OBJECT_HANDLE hKey; + CK_BYTE * pSeed; + CK_ULONG ulSeedLen; +}; + +struct CK_OTP_PARAM { + CK_OTP_PARAM_TYPE type; + void * pValue; + CK_ULONG ulValueLen; +}; + +struct CK_OTP_PARAMS { + CK_OTP_PARAM * pParams; + CK_ULONG ulCount; +}; + +struct CK_OTP_SIGNATURE_INFO { + CK_OTP_PARAM * pParams; + CK_ULONG ulCount; +}; + +struct CK_PBE_PARAMS { + CK_BYTE * pInitVector; + CK_UTF8CHAR * pPassword; + CK_ULONG ulPasswordLen; + CK_BYTE * pSalt; + CK_ULONG ulSaltLen; + CK_ULONG ulIteration; +}; + +struct CK_PKCS5_PBKD2_PARAMS { + CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; + void * pSaltSourceData; + CK_ULONG ulSaltSourceDataLen; + CK_ULONG iterations; + CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf; + void * pPrfData; + CK_ULONG ulPrfDataLen; + CK_UTF8CHAR * pPassword; + CK_ULONG * ulPasswordLen; +}; + +struct CK_PKCS5_PBKD2_PARAMS2 { + CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; + void * pSaltSourceData; + CK_ULONG ulSaltSourceDataLen; + CK_ULONG iterations; + CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf; + void * pPrfData; + CK_ULONG ulPrfDataLen; + CK_UTF8CHAR * pPassword; + CK_ULONG ulPasswordLen; +}; + +struct CK_RC2_CBC_PARAMS { + CK_ULONG ulEffectiveBits; + CK_BYTE iv[8]; +}; + +struct CK_RC2_MAC_GENERAL_PARAMS { + CK_ULONG ulEffectiveBits; + CK_ULONG ulMacLength; +}; + +struct CK_RC5_CBC_PARAMS { + CK_ULONG ulWordsize; + CK_ULONG ulRounds; + CK_BYTE * pIv; + CK_ULONG ulIvLen; +}; + +struct CK_RC5_MAC_GENERAL_PARAMS { + CK_ULONG ulWordsize; + CK_ULONG ulRounds; + CK_ULONG ulMacLength; +}; + +struct CK_RC5_PARAMS { + CK_ULONG ulWordsize; + CK_ULONG ulRounds; +}; + +struct CK_RSA_AES_KEY_WRAP_PARAMS { + CK_ULONG ulAESKeyBits; + CK_RSA_PKCS_OAEP_PARAMS * pOAEPParams; +}; + +struct CK_RSA_PKCS_OAEP_PARAMS { + CK_MECHANISM_TYPE hashAlg; + CK_RSA_PKCS_MGF_TYPE mgf; + CK_RSA_PKCS_OAEP_SOURCE_TYPE source; + void * pSourceData; + CK_ULONG ulSourceDataLen; +}; + +struct CK_RSA_PKCS_PSS_PARAMS { + CK_MECHANISM_TYPE hashAlg; + CK_RSA_PKCS_MGF_TYPE mgf; + CK_ULONG sLen; +}; + +struct CK_SEED_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[16]; + CK_BYTE * pData; + CK_ULONG length; +}; + +struct CK_SKIPJACK_PRIVATE_WRAP_PARAMS { + CK_ULONG ulPasswordLen; + CK_BYTE * pPassword; + CK_ULONG ulPublicDataLen; + CK_BYTE * pPublicData; + CK_ULONG ulPAndGLen; + CK_ULONG ulQLen; + CK_ULONG ulRandomLen; + CK_BYTE * pRandomA; + CK_BYTE * pPrimeP; + CK_BYTE * pBaseG; + CK_BYTE * pSubprimeQ; +}; + +struct CK_SKIPJACK_RELAYX_PARAMS { + CK_ULONG ulOldWrappedXLen; + CK_BYTE * pOldWrappedX; + CK_ULONG ulOldPasswordLen; + CK_BYTE * pOldPassword; + CK_ULONG ulOldPublicDataLen; + CK_BYTE * pOldPublicData; + CK_ULONG ulOldRandomLen; + CK_BYTE * pOldRandomA; + CK_ULONG ulNewPasswordLen; + CK_BYTE * pNewPassword; + CK_ULONG ulNewPublicDataLen; + CK_BYTE * pNewPublicData; + CK_ULONG ulNewRandomLen; + CK_BYTE * pNewRandomA; +}; + +struct CK_X9_42_DH1_DERIVE_PARAMS { + CK_X9_42_DH_KDF_TYPE kdf; + CK_ULONG ulOtherInfoLen; + CK_BYTE * pOtherInfo; + CK_ULONG ulPublicDataLen; + CK_BYTE * pPublicData; +}; + +struct CK_X9_42_DH2_DERIVE_PARAMS { + CK_X9_42_DH_KDF_TYPE kdf; + CK_ULONG ulOtherInfoLen; + CK_BYTE * pOtherInfo; + CK_ULONG ulPublicDataLen; + CK_BYTE * pPublicData; + CK_ULONG ulPrivateDataLen; + CK_OBJECT_HANDLE hPrivateData; + CK_ULONG ulPublicDataLen2; + CK_BYTE * pPublicData2; +}; + +struct CK_X9_42_MQV_DERIVE_PARAMS { + CK_X9_42_DH_KDF_TYPE kdf; + CK_ULONG ulOtherInfoLen; + CK_BYTE * OtherInfo; + CK_ULONG ulPublicDataLen; + CK_BYTE * PublicData; + CK_ULONG ulPrivateDataLen; + CK_OBJECT_HANDLE hPrivateData; + CK_ULONG ulPublicDataLen2; + CK_BYTE * PublicData2; + CK_OBJECT_HANDLE publicKey; +}; + +/* TLS related structure definitions */ +STRUCTDEF(CK_SSL3_KEY_MAT_OUT); +STRUCTDEF(CK_SSL3_KEY_MAT_PARAMS); +STRUCTDEF(CK_SSL3_MASTER_KEY_DERIVE_PARAMS); +STRUCTDEF(CK_SSL3_RANDOM_DATA); +STRUCTDEF(CK_TLS_KDF_PARAMS); +STRUCTDEF(CK_TLS_MAC_PARAMS); +STRUCTDEF(CK_TLS_PRF_PARAMS); +STRUCTDEF(CK_TLS12_KEY_MAT_PARAMS); +STRUCTDEF(CK_TLS12_MASTER_KEY_DERIVE_PARAMS); +STRUCTDEF(CK_WTLS_KEY_MAT_OUT); +STRUCTDEF(CK_WTLS_KEY_MAT_PARAMS); +STRUCTDEF(CK_WTLS_MASTER_KEY_DERIVE_PARAMS); +STRUCTDEF(CK_WTLS_PRF_PARAMS); +STRUCTDEF(CK_WTLS_RANDOM_DATA); + +struct CK_SSL3_KEY_MAT_OUT { + CK_OBJECT_HANDLE hClientMacSecret; + CK_OBJECT_HANDLE hServerMacSecret; + CK_OBJECT_HANDLE hClientKey; + CK_OBJECT_HANDLE hServerKey; + CK_BYTE * pIVClient; + CK_BYTE * pIVServer; +}; + +struct CK_SSL3_RANDOM_DATA { + CK_BYTE * pClientRandom; + CK_ULONG ulClientRandomLen; + CK_BYTE * pServerRandom; + CK_ULONG ulServerRandomLen; +}; + +struct CK_SSL3_KEY_MAT_PARAMS { + CK_ULONG ulMacSizeInBits; + CK_ULONG ulKeySizeInBits; + CK_ULONG ulIVSizeInBits; + CK_BBOOL bIsExport; + CK_SSL3_RANDOM_DATA RandomInfo; + CK_SSL3_KEY_MAT_OUT * pReturnedKeyMaterial; +}; + +struct CK_SSL3_MASTER_KEY_DERIVE_PARAMS { + CK_SSL3_RANDOM_DATA RandomInfo; + CK_VERSION * pVersion; +}; + +struct CK_TLS_KDF_PARAMS { + CK_MECHANISM_TYPE prfMechanism; + CK_BYTE * pLabel; + CK_ULONG ulLabelLength; + CK_SSL3_RANDOM_DATA RandomInfo; + CK_BYTE * pContextData; + CK_ULONG ulContextDataLength; +}; + +struct CK_TLS_MAC_PARAMS { + CK_MECHANISM_TYPE prfHashMechanism; + CK_ULONG ulMacLength; + CK_ULONG ulServerOrClient; +}; + +struct CK_TLS_PRF_PARAMS { + CK_BYTE * pSeed; + CK_ULONG ulSeedLen; + CK_BYTE * pLabel; + CK_ULONG ulLabelLen; + CK_BYTE * pOutput; + CK_ULONG * pulOutputLen; +}; + +struct CK_TLS12_KEY_MAT_PARAMS { + CK_ULONG ulMacSizeInBits; + CK_ULONG ulKeySizeInBits; + CK_ULONG ulIVSizeInBits; + CK_BBOOL bIsExport; + CK_SSL3_RANDOM_DATA RandomInfo; + CK_SSL3_KEY_MAT_OUT * pReturnedKeyMaterial; + CK_MECHANISM_TYPE prfHashMechanism; +}; + +struct CK_TLS12_MASTER_KEY_DERIVE_PARAMS { + CK_SSL3_RANDOM_DATA RandomInfo; + CK_VERSION * pVersion; + CK_MECHANISM_TYPE prfHashMechanism; +}; + +struct CK_WTLS_KEY_MAT_OUT { + CK_OBJECT_HANDLE hMacSecret; + CK_OBJECT_HANDLE hKey; + CK_BYTE * pIV; +}; + +struct CK_WTLS_RANDOM_DATA { + CK_BYTE * pClientRandom; + CK_ULONG ulClientRandomLen; + CK_BYTE * pServerRandom; + CK_ULONG ulServerRandomLen; +}; + +struct CK_WTLS_KEY_MAT_PARAMS { + CK_MECHANISM_TYPE DigestMechanism; + CK_ULONG ulMacSizeInBits; + CK_ULONG ulKeySizeInBits; + CK_ULONG ulIVSizeInBits; + CK_ULONG ulSequenceNumber; + CK_BBOOL bIsExport; + CK_WTLS_RANDOM_DATA RandomInfo; + CK_WTLS_KEY_MAT_OUT * pReturnedKeyMaterial; +}; + +struct CK_WTLS_MASTER_KEY_DERIVE_PARAMS { + CK_MECHANISM_TYPE DigestMechanism; + CK_WTLS_RANDOM_DATA RandomInfo; + CK_BYTE * pVersion; +}; + +struct CK_WTLS_PRF_PARAMS { + CK_MECHANISM_TYPE DigestMechanism; + CK_BYTE * pSeed; + CK_ULONG ulSeedLen; + CK_BYTE * pLabel; + CK_ULONG ulLabelLen; + CK_BYTE * pOutput; + CK_ULONG * pulOutputLen; +}; + +/* PKCS11 Functions */ +extern CK_RV C_Initialize(void *); +extern CK_RV C_Finalize(void *); +extern CK_RV C_GetInfo(CK_INFO *); +extern CK_RV C_GetFunctionList(CK_FUNCTION_LIST **); +extern CK_RV C_GetSlotList(CK_BBOOL, CK_SLOT_ID *, CK_ULONG *); +extern CK_RV C_GetSlotInfo(CK_SLOT_ID, CK_SLOT_INFO *); +extern CK_RV C_GetTokenInfo(CK_SLOT_ID, CK_TOKEN_INFO *); +extern CK_RV C_GetMechanismList(CK_SLOT_ID, CK_MECHANISM_TYPE *, CK_ULONG *); +extern CK_RV C_GetMechanismInfo(CK_SLOT_ID, CK_MECHANISM_TYPE, + CK_MECHANISM_INFO *); +extern CK_RV C_InitToken(CK_SLOT_ID, CK_UTF8CHAR *, CK_ULONG, CK_UTF8CHAR *); +extern CK_RV C_InitPIN(CK_SESSION_HANDLE, CK_UTF8CHAR *, CK_ULONG); +extern CK_RV C_SetPIN(CK_SESSION_HANDLE, CK_UTF8CHAR *, CK_ULONG, CK_UTF8CHAR *, + CK_ULONG); +extern CK_RV C_OpenSession(CK_SLOT_ID, CK_FLAGS, void *, CK_NOTIFY, + CK_SESSION_HANDLE *); +extern CK_RV C_CloseSession(CK_SESSION_HANDLE); +extern CK_RV C_CloseAllSessions(CK_SLOT_ID); +extern CK_RV C_GetSessionInfo(CK_SESSION_HANDLE, CK_SESSION_INFO *); +extern CK_RV C_GetOperationState(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); +extern CK_RV C_SetOperationState(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_OBJECT_HANDLE, CK_OBJECT_HANDLE); +extern CK_RV C_Login(CK_SESSION_HANDLE, CK_USER_TYPE, CK_UTF8CHAR *, CK_ULONG); +extern CK_RV C_Logout(CK_SESSION_HANDLE); +extern CK_RV C_CreateObject(CK_SESSION_HANDLE, CK_ATTRIBUTE *, CK_ULONG, + CK_OBJECT_HANDLE *); +extern CK_RV C_CopyObject(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ATTRIBUTE *, + CK_ULONG, CK_OBJECT_HANDLE *); +extern CK_RV C_DestroyObject(CK_SESSION_HANDLE, CK_OBJECT_HANDLE); +extern CK_RV C_GetObjectSize(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ULONG *); +extern CK_RV C_GetAttributeValue(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, + CK_ATTRIBUTE *, CK_ULONG); +extern CK_RV C_SetAttributeValue(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, + CK_ATTRIBUTE *, CK_ULONG); +extern CK_RV C_FindObjectsInit(CK_SESSION_HANDLE, CK_ATTRIBUTE *, CK_ULONG); +extern CK_RV C_FindObjects(CK_SESSION_HANDLE, CK_OBJECT_HANDLE *, CK_ULONG, + CK_ULONG *); +extern CK_RV C_FindObjectsFinal(CK_SESSION_HANDLE); +extern CK_RV C_EncryptInit(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE); +extern CK_RV C_Encrypt(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, + CK_ULONG *); +extern CK_RV C_EncryptUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +extern CK_RV C_EncryptFinal(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); +extern CK_RV C_DecryptInit(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE); +extern CK_RV C_Decrypt(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, + CK_ULONG *); +extern CK_RV C_DecryptUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +extern CK_RV C_DecryptFinal(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); +extern CK_RV C_DigestInit(CK_SESSION_HANDLE, CK_MECHANISM *); +extern CK_RV C_Digest(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, + CK_ULONG *); +extern CK_RV C_DigestUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); +extern CK_RV C_DigestKey(CK_SESSION_HANDLE, CK_OBJECT_HANDLE); +extern CK_RV C_DigestFinal(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); +extern CK_RV C_SignInit(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE); +extern CK_RV C_Sign(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, + CK_ULONG *); +extern CK_RV C_SignUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); +extern CK_RV C_SignFinal(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); +extern CK_RV C_SignRecoverInit(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE); +extern CK_RV C_SignRecover(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, + CK_ULONG *); +extern CK_RV C_VerifyInit(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE); +extern CK_RV C_Verify(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, + CK_ULONG); +extern CK_RV C_VerifyUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); +extern CK_RV C_VerifyFinal(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); +extern CK_RV C_VerifyRecoverInit(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE); +extern CK_RV C_VerifyRecover(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +extern CK_RV C_DigestEncryptUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +extern CK_RV C_DecryptDigestUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +extern CK_RV C_SignEncryptUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +extern CK_RV C_DecryptVerifyUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +extern CK_RV C_GenerateKey(CK_SESSION_HANDLE, CK_MECHANISM *, CK_ATTRIBUTE *, + CK_ULONG, CK_OBJECT_HANDLE *); +extern CK_RV C_GenerateKeyPair(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_ATTRIBUTE *, CK_ULONG, CK_ATTRIBUTE *, + CK_ULONG, CK_OBJECT_HANDLE *, + CK_OBJECT_HANDLE *); +extern CK_RV C_WrapKey(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE, + CK_OBJECT_HANDLE, CK_BYTE *, CK_ULONG *); +extern CK_RV C_UnwrapKey(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE, + CK_BYTE *, CK_ULONG *, CK_ATTRIBUTE *, CK_ULONG, + CK_OBJECT_HANDLE *); +extern CK_RV C_DeriveKey(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE, + CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *); +extern CK_RV C_SeedRandom(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); +extern CK_RV C_GenerateRandom(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); +extern CK_RV C_GetFunctionStatus(CK_SESSION_HANDLE); +extern CK_RV C_CancelFunction(CK_SESSION_HANDLE); +extern CK_RV C_WaitForSlotEvent(CK_FLAGS, CK_SLOT_ID *, void *); + +typedef CK_RV (* CK_C_Initialize)(void *); +typedef CK_RV (* CK_C_Finalize)(void *); +typedef CK_RV (* CK_C_GetInfo)(CK_INFO *); +typedef CK_RV (* CK_C_GetFunctionList)(CK_FUNCTION_LIST **); +typedef CK_RV (* CK_C_GetSlotList)(CK_BBOOL, CK_SLOT_ID *, CK_ULONG *); +typedef CK_RV (* CK_C_GetSlotInfo)(CK_SLOT_ID, CK_SLOT_INFO *); +typedef CK_RV (* CK_C_GetTokenInfo)(CK_SLOT_ID, CK_TOKEN_INFO *); +typedef CK_RV (* CK_C_GetMechanismList)(CK_SLOT_ID, CK_MECHANISM_TYPE *, + CK_ULONG *); +typedef CK_RV (* CK_C_GetMechanismInfo)(CK_SLOT_ID, CK_MECHANISM_TYPE, + CK_MECHANISM_INFO *); +typedef CK_RV (* CK_C_InitToken)(CK_SLOT_ID, CK_UTF8CHAR *, CK_ULONG, + CK_UTF8CHAR *); +typedef CK_RV (* CK_C_InitPIN)(CK_SESSION_HANDLE, CK_UTF8CHAR *, CK_ULONG); +typedef CK_RV (* CK_C_SetPIN)(CK_SESSION_HANDLE, CK_UTF8CHAR *, CK_ULONG, + CK_UTF8CHAR *, CK_ULONG); +typedef CK_RV (* CK_C_OpenSession)(CK_SLOT_ID, CK_FLAGS, void *, CK_NOTIFY, + CK_SESSION_HANDLE *); +typedef CK_RV (* CK_C_CloseSession)(CK_SESSION_HANDLE); +typedef CK_RV (* CK_C_CloseAllSessions)(CK_SLOT_ID); +typedef CK_RV (* CK_C_GetSessionInfo)(CK_SESSION_HANDLE, CK_SESSION_INFO *); +typedef CK_RV (* CK_C_GetOperationState)(CK_SESSION_HANDLE, CK_BYTE *, + CK_ULONG *); +typedef CK_RV (* CK_C_SetOperationState)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_OBJECT_HANDLE, CK_OBJECT_HANDLE); +typedef CK_RV (* CK_C_Login)(CK_SESSION_HANDLE, CK_USER_TYPE, CK_UTF8CHAR *, + CK_ULONG); +typedef CK_RV (* CK_C_Logout)(CK_SESSION_HANDLE); +typedef CK_RV (* CK_C_CreateObject)(CK_SESSION_HANDLE, CK_ATTRIBUTE *, CK_ULONG, + CK_OBJECT_HANDLE *); +typedef CK_RV (* CK_C_CopyObject)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, + CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *); +typedef CK_RV (* CK_C_DestroyObject)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE); +typedef CK_RV (* CK_C_GetObjectSize)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, + CK_ULONG *); +typedef CK_RV (* CK_C_GetAttributeValue)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, + CK_ATTRIBUTE *, CK_ULONG); +typedef CK_RV (* CK_C_SetAttributeValue)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, + CK_ATTRIBUTE *, CK_ULONG); +typedef CK_RV (* CK_C_FindObjectsInit)(CK_SESSION_HANDLE, CK_ATTRIBUTE *, + CK_ULONG); +typedef CK_RV (* CK_C_FindObjects)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE *, + CK_ULONG, CK_ULONG *); +typedef CK_RV (* CK_C_FindObjectsFinal)(CK_SESSION_HANDLE); +typedef CK_RV (* CK_C_EncryptInit)(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE); +typedef CK_RV (* CK_C_Encrypt)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_EncryptUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_EncryptFinal)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_DecryptInit)(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE); +typedef CK_RV (* CK_C_Decrypt)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_DecryptUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_DecryptFinal)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_DigestInit)(CK_SESSION_HANDLE, CK_MECHANISM *); +typedef CK_RV (* CK_C_Digest)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, + CK_ULONG *); +typedef CK_RV (* CK_C_DigestUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); +typedef CK_RV (* CK_C_DigestKey)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE); +typedef CK_RV (* CK_C_DigestFinal)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_SignInit)(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE); +typedef CK_RV (* CK_C_Sign)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, + CK_ULONG *); +typedef CK_RV (* CK_C_SignUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); +typedef CK_RV (* CK_C_SignFinal)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_SignRecoverInit)(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE); +typedef CK_RV (* CK_C_SignRecover)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_VerifyInit)(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE); +typedef CK_RV (* CK_C_Verify)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, + CK_ULONG); +typedef CK_RV (* CK_C_VerifyUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); +typedef CK_RV (* CK_C_VerifyFinal)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); +typedef CK_RV (* CK_C_VerifyRecoverInit)(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE); +typedef CK_RV (* CK_C_VerifyRecover)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_DigestEncryptUpdate)(CK_SESSION_HANDLE, CK_BYTE *, + CK_ULONG, CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_DecryptDigestUpdate)(CK_SESSION_HANDLE, CK_BYTE *, + CK_ULONG, CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_SignEncryptUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, + CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_DecryptVerifyUpdate)(CK_SESSION_HANDLE, CK_BYTE *, + CK_ULONG, CK_BYTE *, CK_ULONG *); +typedef CK_RV (* CK_C_GenerateKey)(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_ATTRIBUTE *, CK_ULONG, + CK_OBJECT_HANDLE *); +typedef CK_RV (* CK_C_GenerateKeyPair)(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_ATTRIBUTE *, CK_ULONG, CK_ATTRIBUTE *, + CK_ULONG, CK_OBJECT_HANDLE *, + CK_OBJECT_HANDLE *); +typedef CK_RV (* CK_C_WrapKey)(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE, CK_OBJECT_HANDLE, CK_BYTE *, + CK_ULONG *); +typedef CK_RV (* CK_C_UnwrapKey)(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE, CK_BYTE *, CK_ULONG, + CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *); +typedef CK_RV (* CK_C_DeriveKey)(CK_SESSION_HANDLE, CK_MECHANISM *, + CK_OBJECT_HANDLE, CK_ATTRIBUTE *, CK_ULONG, + CK_OBJECT_HANDLE *); +typedef CK_RV (* CK_C_SeedRandom)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); +typedef CK_RV (* CK_C_GenerateRandom)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); +typedef CK_RV (* CK_C_GetFunctionStatus)(CK_SESSION_HANDLE); +typedef CK_RV (* CK_C_CancelFunction)(CK_SESSION_HANDLE); +typedef CK_RV (* CK_C_WaitForSlotEvent)(CK_FLAGS, CK_SLOT_ID *, void *); + +struct CK_FUNCTION_LIST { + CK_VERSION version; + CK_C_Initialize C_Initialize; + CK_C_Finalize C_Finalize; + CK_C_GetInfo C_GetInfo; + CK_C_GetFunctionList C_GetFunctionList; + CK_C_GetSlotList C_GetSlotList; + CK_C_GetSlotInfo C_GetSlotInfo; + CK_C_GetTokenInfo C_GetTokenInfo; + CK_C_GetMechanismList C_GetMechanismList; + CK_C_GetMechanismInfo C_GetMechanismInfo; + CK_C_InitToken C_InitToken; + CK_C_InitPIN C_InitPIN; + CK_C_SetPIN C_SetPIN; + CK_C_OpenSession C_OpenSession; + CK_C_CloseSession C_CloseSession; + CK_C_CloseAllSessions C_CloseAllSessions; + CK_C_GetSessionInfo C_GetSessionInfo; + CK_C_GetOperationState C_GetOperationState; + CK_C_SetOperationState C_SetOperationState; + CK_C_Login C_Login; + CK_C_Logout C_Logout; + CK_C_CreateObject C_CreateObject; + CK_C_CopyObject C_CopyObject; + CK_C_DestroyObject C_DestroyObject; + CK_C_GetObjectSize C_GetObjectSize; + CK_C_GetAttributeValue C_GetAttributeValue; + CK_C_SetAttributeValue C_SetAttributeValue; + CK_C_FindObjectsInit C_FindObjectsInit; + CK_C_FindObjects C_FindObjects; + CK_C_FindObjectsFinal C_FindObjectsFinal; + CK_C_EncryptInit C_EncryptInit; + CK_C_Encrypt C_Encrypt; + CK_C_EncryptUpdate C_EncryptUpdate; + CK_C_EncryptFinal C_EncryptFinal; + CK_C_DecryptInit C_DecryptInit; + CK_C_Decrypt C_Decrypt; + CK_C_DecryptUpdate C_DecryptUpdate; + CK_C_DecryptFinal C_DecryptFinal; + CK_C_DigestInit C_DigestInit; + CK_C_Digest C_Digest; + CK_C_DigestUpdate C_DigestUpdate; + CK_C_DigestKey C_DigestKey; + CK_C_DigestFinal C_DigestFinal; + CK_C_SignInit C_SignInit; + CK_C_Sign C_Sign; + CK_C_SignUpdate C_SignUpdate; + CK_C_SignFinal C_SignFinal; + CK_C_SignRecoverInit C_SignRecoverInit; + CK_C_SignRecover C_SignRecover; + CK_C_VerifyInit C_VerifyInit; + CK_C_Verify C_Verify; + CK_C_VerifyUpdate C_VerifyUpdate; + CK_C_VerifyFinal C_VerifyFinal; + CK_C_VerifyRecoverInit C_VerifyRecoverInit; + CK_C_VerifyRecover C_VerifyRecover; + CK_C_DigestEncryptUpdate C_DigestEncryptUpdate; + CK_C_DecryptDigestUpdate C_DecryptDigestUpdate; + CK_C_SignEncryptUpdate C_SignEncryptUpdate; + CK_C_DecryptVerifyUpdate C_DecryptVerifyUpdate; + CK_C_GenerateKey C_GenerateKey; + CK_C_GenerateKeyPair C_GenerateKeyPair; + CK_C_WrapKey C_WrapKey; + CK_C_UnwrapKey C_UnwrapKey; + CK_C_DeriveKey C_DeriveKey; + CK_C_SeedRandom C_SeedRandom; + CK_C_GenerateRandom C_GenerateRandom; + CK_C_GetFunctionStatus C_GetFunctionStatus; + CK_C_CancelFunction C_CancelFunction; + CK_C_WaitForSlotEvent C_WaitForSlotEvent; +}; + + +#endif diff --git a/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11f.h b/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11f.h deleted file mode 100644 index ed90affc5e3..00000000000 --- a/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11f.h +++ /dev/null @@ -1,939 +0,0 @@ -/* Copyright (c) OASIS Open 2016. All Rights Reserved./ - * /Distributed under the terms of the OASIS IPR Policy, - * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY - * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A - * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. - */ - -/* Latest version of the specification: - * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html - */ - -/* This header file contains pretty much everything about all the - * Cryptoki function prototypes. Because this information is - * used for more than just declaring function prototypes, the - * order of the functions appearing herein is important, and - * should not be altered. - */ - -/* General-purpose */ - -/* C_Initialize initializes the Cryptoki library. */ -CK_PKCS11_FUNCTION_INFO(C_Initialize) -#ifdef CK_NEED_ARG_LIST -( - CK_VOID_PTR pInitArgs /* if this is not NULL_PTR, it gets - * cast to CK_C_INITIALIZE_ARGS_PTR - * and dereferenced - */ -); -#endif - - -/* C_Finalize indicates that an application is done with the - * Cryptoki library. - */ -CK_PKCS11_FUNCTION_INFO(C_Finalize) -#ifdef CK_NEED_ARG_LIST -( - CK_VOID_PTR pReserved /* reserved. Should be NULL_PTR */ -); -#endif - - -/* C_GetInfo returns general information about Cryptoki. */ -CK_PKCS11_FUNCTION_INFO(C_GetInfo) -#ifdef CK_NEED_ARG_LIST -( - CK_INFO_PTR pInfo /* location that receives information */ -); -#endif - - -/* C_GetFunctionList returns the function list. */ -CK_PKCS11_FUNCTION_INFO(C_GetFunctionList) -#ifdef CK_NEED_ARG_LIST -( - CK_FUNCTION_LIST_PTR_PTR ppFunctionList /* receives pointer to - * function list - */ -); -#endif - - - -/* Slot and token management */ - -/* C_GetSlotList obtains a list of slots in the system. */ -CK_PKCS11_FUNCTION_INFO(C_GetSlotList) -#ifdef CK_NEED_ARG_LIST -( - CK_BBOOL tokenPresent, /* only slots with tokens */ - CK_SLOT_ID_PTR pSlotList, /* receives array of slot IDs */ - CK_ULONG_PTR pulCount /* receives number of slots */ -); -#endif - - -/* C_GetSlotInfo obtains information about a particular slot in - * the system. - */ -CK_PKCS11_FUNCTION_INFO(C_GetSlotInfo) -#ifdef CK_NEED_ARG_LIST -( - CK_SLOT_ID slotID, /* the ID of the slot */ - CK_SLOT_INFO_PTR pInfo /* receives the slot information */ -); -#endif - - -/* C_GetTokenInfo obtains information about a particular token - * in the system. - */ -CK_PKCS11_FUNCTION_INFO(C_GetTokenInfo) -#ifdef CK_NEED_ARG_LIST -( - CK_SLOT_ID slotID, /* ID of the token's slot */ - CK_TOKEN_INFO_PTR pInfo /* receives the token information */ -); -#endif - - -/* C_GetMechanismList obtains a list of mechanism types - * supported by a token. - */ -CK_PKCS11_FUNCTION_INFO(C_GetMechanismList) -#ifdef CK_NEED_ARG_LIST -( - CK_SLOT_ID slotID, /* ID of token's slot */ - CK_MECHANISM_TYPE_PTR pMechanismList, /* gets mech. array */ - CK_ULONG_PTR pulCount /* gets # of mechs. */ -); -#endif - - -/* C_GetMechanismInfo obtains information about a particular - * mechanism possibly supported by a token. - */ -CK_PKCS11_FUNCTION_INFO(C_GetMechanismInfo) -#ifdef CK_NEED_ARG_LIST -( - CK_SLOT_ID slotID, /* ID of the token's slot */ - CK_MECHANISM_TYPE type, /* type of mechanism */ - CK_MECHANISM_INFO_PTR pInfo /* receives mechanism info */ -); -#endif - - -/* C_InitToken initializes a token. */ -CK_PKCS11_FUNCTION_INFO(C_InitToken) -#ifdef CK_NEED_ARG_LIST -( - CK_SLOT_ID slotID, /* ID of the token's slot */ - CK_UTF8CHAR_PTR pPin, /* the SO's initial PIN */ - CK_ULONG ulPinLen, /* length in bytes of the PIN */ - CK_UTF8CHAR_PTR pLabel /* 32-byte token label (blank padded) */ -); -#endif - - -/* C_InitPIN initializes the normal user's PIN. */ -CK_PKCS11_FUNCTION_INFO(C_InitPIN) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_UTF8CHAR_PTR pPin, /* the normal user's PIN */ - CK_ULONG ulPinLen /* length in bytes of the PIN */ -); -#endif - - -/* C_SetPIN modifies the PIN of the user who is logged in. */ -CK_PKCS11_FUNCTION_INFO(C_SetPIN) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_UTF8CHAR_PTR pOldPin, /* the old PIN */ - CK_ULONG ulOldLen, /* length of the old PIN */ - CK_UTF8CHAR_PTR pNewPin, /* the new PIN */ - CK_ULONG ulNewLen /* length of the new PIN */ -); -#endif - - - -/* Session management */ - -/* C_OpenSession opens a session between an application and a - * token. - */ -CK_PKCS11_FUNCTION_INFO(C_OpenSession) -#ifdef CK_NEED_ARG_LIST -( - CK_SLOT_ID slotID, /* the slot's ID */ - CK_FLAGS flags, /* from CK_SESSION_INFO */ - CK_VOID_PTR pApplication, /* passed to callback */ - CK_NOTIFY Notify, /* callback function */ - CK_SESSION_HANDLE_PTR phSession /* gets session handle */ -); -#endif - - -/* C_CloseSession closes a session between an application and a - * token. - */ -CK_PKCS11_FUNCTION_INFO(C_CloseSession) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession /* the session's handle */ -); -#endif - - -/* C_CloseAllSessions closes all sessions with a token. */ -CK_PKCS11_FUNCTION_INFO(C_CloseAllSessions) -#ifdef CK_NEED_ARG_LIST -( - CK_SLOT_ID slotID /* the token's slot */ -); -#endif - - -/* C_GetSessionInfo obtains information about the session. */ -CK_PKCS11_FUNCTION_INFO(C_GetSessionInfo) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_SESSION_INFO_PTR pInfo /* receives session info */ -); -#endif - - -/* C_GetOperationState obtains the state of the cryptographic operation - * in a session. - */ -CK_PKCS11_FUNCTION_INFO(C_GetOperationState) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session's handle */ - CK_BYTE_PTR pOperationState, /* gets state */ - CK_ULONG_PTR pulOperationStateLen /* gets state length */ -); -#endif - - -/* C_SetOperationState restores the state of the cryptographic - * operation in a session. - */ -CK_PKCS11_FUNCTION_INFO(C_SetOperationState) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session's handle */ - CK_BYTE_PTR pOperationState, /* holds state */ - CK_ULONG ulOperationStateLen, /* holds state length */ - CK_OBJECT_HANDLE hEncryptionKey, /* en/decryption key */ - CK_OBJECT_HANDLE hAuthenticationKey /* sign/verify key */ -); -#endif - - -/* C_Login logs a user into a token. */ -CK_PKCS11_FUNCTION_INFO(C_Login) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_USER_TYPE userType, /* the user type */ - CK_UTF8CHAR_PTR pPin, /* the user's PIN */ - CK_ULONG ulPinLen /* the length of the PIN */ -); -#endif - - -/* C_Logout logs a user out from a token. */ -CK_PKCS11_FUNCTION_INFO(C_Logout) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession /* the session's handle */ -); -#endif - - - -/* Object management */ - -/* C_CreateObject creates a new object. */ -CK_PKCS11_FUNCTION_INFO(C_CreateObject) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_ATTRIBUTE_PTR pTemplate, /* the object's template */ - CK_ULONG ulCount, /* attributes in template */ - CK_OBJECT_HANDLE_PTR phObject /* gets new object's handle. */ -); -#endif - - -/* C_CopyObject copies an object, creating a new object for the - * copy. - */ -CK_PKCS11_FUNCTION_INFO(C_CopyObject) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_OBJECT_HANDLE hObject, /* the object's handle */ - CK_ATTRIBUTE_PTR pTemplate, /* template for new object */ - CK_ULONG ulCount, /* attributes in template */ - CK_OBJECT_HANDLE_PTR phNewObject /* receives handle of copy */ -); -#endif - - -/* C_DestroyObject destroys an object. */ -CK_PKCS11_FUNCTION_INFO(C_DestroyObject) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_OBJECT_HANDLE hObject /* the object's handle */ -); -#endif - - -/* C_GetObjectSize gets the size of an object in bytes. */ -CK_PKCS11_FUNCTION_INFO(C_GetObjectSize) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_OBJECT_HANDLE hObject, /* the object's handle */ - CK_ULONG_PTR pulSize /* receives size of object */ -); -#endif - - -/* C_GetAttributeValue obtains the value of one or more object - * attributes. - */ -CK_PKCS11_FUNCTION_INFO(C_GetAttributeValue) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_OBJECT_HANDLE hObject, /* the object's handle */ - CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs; gets vals */ - CK_ULONG ulCount /* attributes in template */ -); -#endif - - -/* C_SetAttributeValue modifies the value of one or more object - * attributes. - */ -CK_PKCS11_FUNCTION_INFO(C_SetAttributeValue) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_OBJECT_HANDLE hObject, /* the object's handle */ - CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs and values */ - CK_ULONG ulCount /* attributes in template */ -); -#endif - - -/* C_FindObjectsInit initializes a search for token and session - * objects that match a template. - */ -CK_PKCS11_FUNCTION_INFO(C_FindObjectsInit) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_ATTRIBUTE_PTR pTemplate, /* attribute values to match */ - CK_ULONG ulCount /* attrs in search template */ -); -#endif - - -/* C_FindObjects continues a search for token and session - * objects that match a template, obtaining additional object - * handles. - */ -CK_PKCS11_FUNCTION_INFO(C_FindObjects) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session's handle */ - CK_OBJECT_HANDLE_PTR phObject, /* gets obj. handles */ - CK_ULONG ulMaxObjectCount, /* max handles to get */ - CK_ULONG_PTR pulObjectCount /* actual # returned */ -); -#endif - - -/* C_FindObjectsFinal finishes a search for token and session - * objects. - */ -CK_PKCS11_FUNCTION_INFO(C_FindObjectsFinal) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession /* the session's handle */ -); -#endif - - - -/* Encryption and decryption */ - -/* C_EncryptInit initializes an encryption operation. */ -CK_PKCS11_FUNCTION_INFO(C_EncryptInit) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_MECHANISM_PTR pMechanism, /* the encryption mechanism */ - CK_OBJECT_HANDLE hKey /* handle of encryption key */ -); -#endif - - -/* C_Encrypt encrypts single-part data. */ -CK_PKCS11_FUNCTION_INFO(C_Encrypt) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session's handle */ - CK_BYTE_PTR pData, /* the plaintext data */ - CK_ULONG ulDataLen, /* bytes of plaintext */ - CK_BYTE_PTR pEncryptedData, /* gets ciphertext */ - CK_ULONG_PTR pulEncryptedDataLen /* gets c-text size */ -); -#endif - - -/* C_EncryptUpdate continues a multiple-part encryption - * operation. - */ -CK_PKCS11_FUNCTION_INFO(C_EncryptUpdate) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session's handle */ - CK_BYTE_PTR pPart, /* the plaintext data */ - CK_ULONG ulPartLen, /* plaintext data len */ - CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ - CK_ULONG_PTR pulEncryptedPartLen /* gets c-text size */ -); -#endif - - -/* C_EncryptFinal finishes a multiple-part encryption - * operation. - */ -CK_PKCS11_FUNCTION_INFO(C_EncryptFinal) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session handle */ - CK_BYTE_PTR pLastEncryptedPart, /* last c-text */ - CK_ULONG_PTR pulLastEncryptedPartLen /* gets last size */ -); -#endif - - -/* C_DecryptInit initializes a decryption operation. */ -CK_PKCS11_FUNCTION_INFO(C_DecryptInit) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_MECHANISM_PTR pMechanism, /* the decryption mechanism */ - CK_OBJECT_HANDLE hKey /* handle of decryption key */ -); -#endif - - -/* C_Decrypt decrypts encrypted data in a single part. */ -CK_PKCS11_FUNCTION_INFO(C_Decrypt) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session's handle */ - CK_BYTE_PTR pEncryptedData, /* ciphertext */ - CK_ULONG ulEncryptedDataLen, /* ciphertext length */ - CK_BYTE_PTR pData, /* gets plaintext */ - CK_ULONG_PTR pulDataLen /* gets p-text size */ -); -#endif - - -/* C_DecryptUpdate continues a multiple-part decryption - * operation. - */ -CK_PKCS11_FUNCTION_INFO(C_DecryptUpdate) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session's handle */ - CK_BYTE_PTR pEncryptedPart, /* encrypted data */ - CK_ULONG ulEncryptedPartLen, /* input length */ - CK_BYTE_PTR pPart, /* gets plaintext */ - CK_ULONG_PTR pulPartLen /* p-text size */ -); -#endif - - -/* C_DecryptFinal finishes a multiple-part decryption - * operation. - */ -CK_PKCS11_FUNCTION_INFO(C_DecryptFinal) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR pLastPart, /* gets plaintext */ - CK_ULONG_PTR pulLastPartLen /* p-text size */ -); -#endif - - - -/* Message digesting */ - -/* C_DigestInit initializes a message-digesting operation. */ -CK_PKCS11_FUNCTION_INFO(C_DigestInit) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_MECHANISM_PTR pMechanism /* the digesting mechanism */ -); -#endif - - -/* C_Digest digests data in a single part. */ -CK_PKCS11_FUNCTION_INFO(C_Digest) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR pData, /* data to be digested */ - CK_ULONG ulDataLen, /* bytes of data to digest */ - CK_BYTE_PTR pDigest, /* gets the message digest */ - CK_ULONG_PTR pulDigestLen /* gets digest length */ -); -#endif - - -/* C_DigestUpdate continues a multiple-part message-digesting - * operation. - */ -CK_PKCS11_FUNCTION_INFO(C_DigestUpdate) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR pPart, /* data to be digested */ - CK_ULONG ulPartLen /* bytes of data to be digested */ -); -#endif - - -/* C_DigestKey continues a multi-part message-digesting - * operation, by digesting the value of a secret key as part of - * the data already digested. - */ -CK_PKCS11_FUNCTION_INFO(C_DigestKey) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_OBJECT_HANDLE hKey /* secret key to digest */ -); -#endif - - -/* C_DigestFinal finishes a multiple-part message-digesting - * operation. - */ -CK_PKCS11_FUNCTION_INFO(C_DigestFinal) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR pDigest, /* gets the message digest */ - CK_ULONG_PTR pulDigestLen /* gets byte count of digest */ -); -#endif - - - -/* Signing and MACing */ - -/* C_SignInit initializes a signature (private key encryption) - * operation, where the signature is (will be) an appendix to - * the data, and plaintext cannot be recovered from the - * signature. - */ -CK_PKCS11_FUNCTION_INFO(C_SignInit) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_MECHANISM_PTR pMechanism, /* the signature mechanism */ - CK_OBJECT_HANDLE hKey /* handle of signature key */ -); -#endif - - -/* C_Sign signs (encrypts with private key) data in a single - * part, where the signature is (will be) an appendix to the - * data, and plaintext cannot be recovered from the signature. - */ -CK_PKCS11_FUNCTION_INFO(C_Sign) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR pData, /* the data to sign */ - CK_ULONG ulDataLen, /* count of bytes to sign */ - CK_BYTE_PTR pSignature, /* gets the signature */ - CK_ULONG_PTR pulSignatureLen /* gets signature length */ -); -#endif - - -/* C_SignUpdate continues a multiple-part signature operation, - * where the signature is (will be) an appendix to the data, - * and plaintext cannot be recovered from the signature. - */ -CK_PKCS11_FUNCTION_INFO(C_SignUpdate) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR pPart, /* the data to sign */ - CK_ULONG ulPartLen /* count of bytes to sign */ -); -#endif - - -/* C_SignFinal finishes a multiple-part signature operation, - * returning the signature. - */ -CK_PKCS11_FUNCTION_INFO(C_SignFinal) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR pSignature, /* gets the signature */ - CK_ULONG_PTR pulSignatureLen /* gets signature length */ -); -#endif - - -/* C_SignRecoverInit initializes a signature operation, where - * the data can be recovered from the signature. - */ -CK_PKCS11_FUNCTION_INFO(C_SignRecoverInit) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_MECHANISM_PTR pMechanism, /* the signature mechanism */ - CK_OBJECT_HANDLE hKey /* handle of the signature key */ -); -#endif - - -/* C_SignRecover signs data in a single operation, where the - * data can be recovered from the signature. - */ -CK_PKCS11_FUNCTION_INFO(C_SignRecover) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR pData, /* the data to sign */ - CK_ULONG ulDataLen, /* count of bytes to sign */ - CK_BYTE_PTR pSignature, /* gets the signature */ - CK_ULONG_PTR pulSignatureLen /* gets signature length */ -); -#endif - - - -/* Verifying signatures and MACs */ - -/* C_VerifyInit initializes a verification operation, where the - * signature is an appendix to the data, and plaintext cannot - * cannot be recovered from the signature (e.g. DSA). - */ -CK_PKCS11_FUNCTION_INFO(C_VerifyInit) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_MECHANISM_PTR pMechanism, /* the verification mechanism */ - CK_OBJECT_HANDLE hKey /* verification key */ -); -#endif - - -/* C_Verify verifies a signature in a single-part operation, - * where the signature is an appendix to the data, and plaintext - * cannot be recovered from the signature. - */ -CK_PKCS11_FUNCTION_INFO(C_Verify) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR pData, /* signed data */ - CK_ULONG ulDataLen, /* length of signed data */ - CK_BYTE_PTR pSignature, /* signature */ - CK_ULONG ulSignatureLen /* signature length*/ -); -#endif - - -/* C_VerifyUpdate continues a multiple-part verification - * operation, where the signature is an appendix to the data, - * and plaintext cannot be recovered from the signature. - */ -CK_PKCS11_FUNCTION_INFO(C_VerifyUpdate) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR pPart, /* signed data */ - CK_ULONG ulPartLen /* length of signed data */ -); -#endif - - -/* C_VerifyFinal finishes a multiple-part verification - * operation, checking the signature. - */ -CK_PKCS11_FUNCTION_INFO(C_VerifyFinal) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR pSignature, /* signature to verify */ - CK_ULONG ulSignatureLen /* signature length */ -); -#endif - - -/* C_VerifyRecoverInit initializes a signature verification - * operation, where the data is recovered from the signature. - */ -CK_PKCS11_FUNCTION_INFO(C_VerifyRecoverInit) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_MECHANISM_PTR pMechanism, /* the verification mechanism */ - CK_OBJECT_HANDLE hKey /* verification key */ -); -#endif - - -/* C_VerifyRecover verifies a signature in a single-part - * operation, where the data is recovered from the signature. - */ -CK_PKCS11_FUNCTION_INFO(C_VerifyRecover) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR pSignature, /* signature to verify */ - CK_ULONG ulSignatureLen, /* signature length */ - CK_BYTE_PTR pData, /* gets signed data */ - CK_ULONG_PTR pulDataLen /* gets signed data len */ -); -#endif - - - -/* Dual-function cryptographic operations */ - -/* C_DigestEncryptUpdate continues a multiple-part digesting - * and encryption operation. - */ -CK_PKCS11_FUNCTION_INFO(C_DigestEncryptUpdate) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session's handle */ - CK_BYTE_PTR pPart, /* the plaintext data */ - CK_ULONG ulPartLen, /* plaintext length */ - CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ - CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */ -); -#endif - - -/* C_DecryptDigestUpdate continues a multiple-part decryption and - * digesting operation. - */ -CK_PKCS11_FUNCTION_INFO(C_DecryptDigestUpdate) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session's handle */ - CK_BYTE_PTR pEncryptedPart, /* ciphertext */ - CK_ULONG ulEncryptedPartLen, /* ciphertext length */ - CK_BYTE_PTR pPart, /* gets plaintext */ - CK_ULONG_PTR pulPartLen /* gets plaintext len */ -); -#endif - - -/* C_SignEncryptUpdate continues a multiple-part signing and - * encryption operation. - */ -CK_PKCS11_FUNCTION_INFO(C_SignEncryptUpdate) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session's handle */ - CK_BYTE_PTR pPart, /* the plaintext data */ - CK_ULONG ulPartLen, /* plaintext length */ - CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ - CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */ -); -#endif - - -/* C_DecryptVerifyUpdate continues a multiple-part decryption and - * verify operation. - */ -CK_PKCS11_FUNCTION_INFO(C_DecryptVerifyUpdate) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session's handle */ - CK_BYTE_PTR pEncryptedPart, /* ciphertext */ - CK_ULONG ulEncryptedPartLen, /* ciphertext length */ - CK_BYTE_PTR pPart, /* gets plaintext */ - CK_ULONG_PTR pulPartLen /* gets p-text length */ -); -#endif - - - -/* Key management */ - -/* C_GenerateKey generates a secret key, creating a new key - * object. - */ -CK_PKCS11_FUNCTION_INFO(C_GenerateKey) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_MECHANISM_PTR pMechanism, /* key generation mech. */ - CK_ATTRIBUTE_PTR pTemplate, /* template for new key */ - CK_ULONG ulCount, /* # of attrs in template */ - CK_OBJECT_HANDLE_PTR phKey /* gets handle of new key */ -); -#endif - - -/* C_GenerateKeyPair generates a public-key/private-key pair, - * creating new key objects. - */ -CK_PKCS11_FUNCTION_INFO(C_GenerateKeyPair) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session handle */ - CK_MECHANISM_PTR pMechanism, /* key-gen mech. */ - CK_ATTRIBUTE_PTR pPublicKeyTemplate, /* template for pub. key */ - CK_ULONG ulPublicKeyAttributeCount, /* # pub. attrs. */ - CK_ATTRIBUTE_PTR pPrivateKeyTemplate, /* template for priv. key */ - CK_ULONG ulPrivateKeyAttributeCount, /* # priv. attrs. */ - CK_OBJECT_HANDLE_PTR phPublicKey, /* gets pub. key handle */ - CK_OBJECT_HANDLE_PTR phPrivateKey /* gets priv. key handle */ -); -#endif - - -/* C_WrapKey wraps (i.e., encrypts) a key. */ -CK_PKCS11_FUNCTION_INFO(C_WrapKey) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_MECHANISM_PTR pMechanism, /* the wrapping mechanism */ - CK_OBJECT_HANDLE hWrappingKey, /* wrapping key */ - CK_OBJECT_HANDLE hKey, /* key to be wrapped */ - CK_BYTE_PTR pWrappedKey, /* gets wrapped key */ - CK_ULONG_PTR pulWrappedKeyLen /* gets wrapped key size */ -); -#endif - - -/* C_UnwrapKey unwraps (decrypts) a wrapped key, creating a new - * key object. - */ -CK_PKCS11_FUNCTION_INFO(C_UnwrapKey) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session's handle */ - CK_MECHANISM_PTR pMechanism, /* unwrapping mech. */ - CK_OBJECT_HANDLE hUnwrappingKey, /* unwrapping key */ - CK_BYTE_PTR pWrappedKey, /* the wrapped key */ - CK_ULONG ulWrappedKeyLen, /* wrapped key len */ - CK_ATTRIBUTE_PTR pTemplate, /* new key template */ - CK_ULONG ulAttributeCount, /* template length */ - CK_OBJECT_HANDLE_PTR phKey /* gets new handle */ -); -#endif - - -/* C_DeriveKey derives a key from a base key, creating a new key - * object. - */ -CK_PKCS11_FUNCTION_INFO(C_DeriveKey) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* session's handle */ - CK_MECHANISM_PTR pMechanism, /* key deriv. mech. */ - CK_OBJECT_HANDLE hBaseKey, /* base key */ - CK_ATTRIBUTE_PTR pTemplate, /* new key template */ - CK_ULONG ulAttributeCount, /* template length */ - CK_OBJECT_HANDLE_PTR phKey /* gets new handle */ -); -#endif - - - -/* Random number generation */ - -/* C_SeedRandom mixes additional seed material into the token's - * random number generator. - */ -CK_PKCS11_FUNCTION_INFO(C_SeedRandom) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR pSeed, /* the seed material */ - CK_ULONG ulSeedLen /* length of seed material */ -); -#endif - - -/* C_GenerateRandom generates random data. */ -CK_PKCS11_FUNCTION_INFO(C_GenerateRandom) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_BYTE_PTR RandomData, /* receives the random data */ - CK_ULONG ulRandomLen /* # of bytes to generate */ -); -#endif - - - -/* Parallel function management */ - -/* C_GetFunctionStatus is a legacy function; it obtains an - * updated status of a function running in parallel with an - * application. - */ -CK_PKCS11_FUNCTION_INFO(C_GetFunctionStatus) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession /* the session's handle */ -); -#endif - - -/* C_CancelFunction is a legacy function; it cancels a function - * running in parallel. - */ -CK_PKCS11_FUNCTION_INFO(C_CancelFunction) -#ifdef CK_NEED_ARG_LIST -( - CK_SESSION_HANDLE hSession /* the session's handle */ -); -#endif - - -/* C_WaitForSlotEvent waits for a slot event (token insertion, - * removal, etc.) to occur. - */ -CK_PKCS11_FUNCTION_INFO(C_WaitForSlotEvent) -#ifdef CK_NEED_ARG_LIST -( - CK_FLAGS flags, /* blocking/nonblocking flag */ - CK_SLOT_ID_PTR pSlot, /* location that receives the slot ID */ - CK_VOID_PTR pRserved /* reserved. Should be NULL_PTR */ -); -#endif - diff --git a/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11t.h b/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11t.h deleted file mode 100644 index c13e67cf55f..00000000000 --- a/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11t.h +++ /dev/null @@ -1,2003 +0,0 @@ -/* Copyright (c) OASIS Open 2016. All Rights Reserved./ - * /Distributed under the terms of the OASIS IPR Policy, - * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY - * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A - * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. - */ - -/* Latest version of the specification: - * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html - */ - -/* See top of pkcs11.h for information about the macros that - * must be defined and the structure-packing conventions that - * must be set before including this file. - */ - -#ifndef _PKCS11T_H_ -#define _PKCS11T_H_ 1 - -#define CRYPTOKI_VERSION_MAJOR 2 -#define CRYPTOKI_VERSION_MINOR 40 -#define CRYPTOKI_VERSION_AMENDMENT 0 - -#define CK_TRUE 1 -#define CK_FALSE 0 - -#ifndef CK_DISABLE_TRUE_FALSE -#ifndef FALSE -#define FALSE CK_FALSE -#endif -#ifndef TRUE -#define TRUE CK_TRUE -#endif -#endif - -/* an unsigned 8-bit value */ -typedef unsigned char CK_BYTE; - -/* an unsigned 8-bit character */ -typedef CK_BYTE CK_CHAR; - -/* an 8-bit UTF-8 character */ -typedef CK_BYTE CK_UTF8CHAR; - -/* a BYTE-sized Boolean flag */ -typedef CK_BYTE CK_BBOOL; - -/* an unsigned value, at least 32 bits long */ -typedef unsigned long int CK_ULONG; - -/* a signed value, the same size as a CK_ULONG */ -typedef long int CK_LONG; - -/* at least 32 bits; each bit is a Boolean flag */ -typedef CK_ULONG CK_FLAGS; - - -/* some special values for certain CK_ULONG variables */ -#define CK_UNAVAILABLE_INFORMATION (~0UL) -#define CK_EFFECTIVELY_INFINITE 0UL - - -typedef CK_BYTE CK_PTR CK_BYTE_PTR; -typedef CK_CHAR CK_PTR CK_CHAR_PTR; -typedef CK_UTF8CHAR CK_PTR CK_UTF8CHAR_PTR; -typedef CK_ULONG CK_PTR CK_ULONG_PTR; -typedef void CK_PTR CK_VOID_PTR; - -/* Pointer to a CK_VOID_PTR-- i.e., pointer to pointer to void */ -typedef CK_VOID_PTR CK_PTR CK_VOID_PTR_PTR; - - -/* The following value is always invalid if used as a session - * handle or object handle - */ -#define CK_INVALID_HANDLE 0UL - - -typedef struct CK_VERSION { - CK_BYTE major; /* integer portion of version number */ - CK_BYTE minor; /* 1/100ths portion of version number */ -} CK_VERSION; - -typedef CK_VERSION CK_PTR CK_VERSION_PTR; - - -typedef struct CK_INFO { - CK_VERSION cryptokiVersion; /* Cryptoki interface ver */ - CK_UTF8CHAR manufacturerID[32]; /* blank padded */ - CK_FLAGS flags; /* must be zero */ - CK_UTF8CHAR libraryDescription[32]; /* blank padded */ - CK_VERSION libraryVersion; /* version of library */ -} CK_INFO; - -typedef CK_INFO CK_PTR CK_INFO_PTR; - - -/* CK_NOTIFICATION enumerates the types of notifications that - * Cryptoki provides to an application - */ -typedef CK_ULONG CK_NOTIFICATION; -#define CKN_SURRENDER 0UL -#define CKN_OTP_CHANGED 1UL - -typedef CK_ULONG CK_SLOT_ID; - -typedef CK_SLOT_ID CK_PTR CK_SLOT_ID_PTR; - - -/* CK_SLOT_INFO provides information about a slot */ -typedef struct CK_SLOT_INFO { - CK_UTF8CHAR slotDescription[64]; /* blank padded */ - CK_UTF8CHAR manufacturerID[32]; /* blank padded */ - CK_FLAGS flags; - - CK_VERSION hardwareVersion; /* version of hardware */ - CK_VERSION firmwareVersion; /* version of firmware */ -} CK_SLOT_INFO; - -/* flags: bit flags that provide capabilities of the slot - * Bit Flag Mask Meaning - */ -#define CKF_TOKEN_PRESENT 0x00000001UL /* a token is there */ -#define CKF_REMOVABLE_DEVICE 0x00000002UL /* removable devices*/ -#define CKF_HW_SLOT 0x00000004UL /* hardware slot */ - -typedef CK_SLOT_INFO CK_PTR CK_SLOT_INFO_PTR; - - -/* CK_TOKEN_INFO provides information about a token */ -typedef struct CK_TOKEN_INFO { - CK_UTF8CHAR label[32]; /* blank padded */ - CK_UTF8CHAR manufacturerID[32]; /* blank padded */ - CK_UTF8CHAR model[16]; /* blank padded */ - CK_CHAR serialNumber[16]; /* blank padded */ - CK_FLAGS flags; /* see below */ - - CK_ULONG ulMaxSessionCount; /* max open sessions */ - CK_ULONG ulSessionCount; /* sess. now open */ - CK_ULONG ulMaxRwSessionCount; /* max R/W sessions */ - CK_ULONG ulRwSessionCount; /* R/W sess. now open */ - CK_ULONG ulMaxPinLen; /* in bytes */ - CK_ULONG ulMinPinLen; /* in bytes */ - CK_ULONG ulTotalPublicMemory; /* in bytes */ - CK_ULONG ulFreePublicMemory; /* in bytes */ - CK_ULONG ulTotalPrivateMemory; /* in bytes */ - CK_ULONG ulFreePrivateMemory; /* in bytes */ - CK_VERSION hardwareVersion; /* version of hardware */ - CK_VERSION firmwareVersion; /* version of firmware */ - CK_CHAR utcTime[16]; /* time */ -} CK_TOKEN_INFO; - -/* The flags parameter is defined as follows: - * Bit Flag Mask Meaning - */ -#define CKF_RNG 0x00000001UL /* has random # generator */ -#define CKF_WRITE_PROTECTED 0x00000002UL /* token is write-protected */ -#define CKF_LOGIN_REQUIRED 0x00000004UL /* user must login */ -#define CKF_USER_PIN_INITIALIZED 0x00000008UL /* normal user's PIN is set */ - -/* CKF_RESTORE_KEY_NOT_NEEDED. If it is set, - * that means that *every* time the state of cryptographic - * operations of a session is successfully saved, all keys - * needed to continue those operations are stored in the state - */ -#define CKF_RESTORE_KEY_NOT_NEEDED 0x00000020UL - -/* CKF_CLOCK_ON_TOKEN. If it is set, that means - * that the token has some sort of clock. The time on that - * clock is returned in the token info structure - */ -#define CKF_CLOCK_ON_TOKEN 0x00000040UL - -/* CKF_PROTECTED_AUTHENTICATION_PATH. If it is - * set, that means that there is some way for the user to login - * without sending a PIN through the Cryptoki library itself - */ -#define CKF_PROTECTED_AUTHENTICATION_PATH 0x00000100UL - -/* CKF_DUAL_CRYPTO_OPERATIONS. If it is true, - * that means that a single session with the token can perform - * dual simultaneous cryptographic operations (digest and - * encrypt; decrypt and digest; sign and encrypt; and decrypt - * and sign) - */ -#define CKF_DUAL_CRYPTO_OPERATIONS 0x00000200UL - -/* CKF_TOKEN_INITIALIZED. If it is true, the - * token has been initialized using C_InitializeToken or an - * equivalent mechanism outside the scope of PKCS #11. - * Calling C_InitializeToken when this flag is set will cause - * the token to be reinitialized. - */ -#define CKF_TOKEN_INITIALIZED 0x00000400UL - -/* CKF_SECONDARY_AUTHENTICATION. If it is - * true, the token supports secondary authentication for - * private key objects. - */ -#define CKF_SECONDARY_AUTHENTICATION 0x00000800UL - -/* CKF_USER_PIN_COUNT_LOW. If it is true, an - * incorrect user login PIN has been entered at least once - * since the last successful authentication. - */ -#define CKF_USER_PIN_COUNT_LOW 0x00010000UL - -/* CKF_USER_PIN_FINAL_TRY. If it is true, - * supplying an incorrect user PIN will it to become locked. - */ -#define CKF_USER_PIN_FINAL_TRY 0x00020000UL - -/* CKF_USER_PIN_LOCKED. If it is true, the - * user PIN has been locked. User login to the token is not - * possible. - */ -#define CKF_USER_PIN_LOCKED 0x00040000UL - -/* CKF_USER_PIN_TO_BE_CHANGED. If it is true, - * the user PIN value is the default value set by token - * initialization or manufacturing, or the PIN has been - * expired by the card. - */ -#define CKF_USER_PIN_TO_BE_CHANGED 0x00080000UL - -/* CKF_SO_PIN_COUNT_LOW. If it is true, an - * incorrect SO login PIN has been entered at least once since - * the last successful authentication. - */ -#define CKF_SO_PIN_COUNT_LOW 0x00100000UL - -/* CKF_SO_PIN_FINAL_TRY. If it is true, - * supplying an incorrect SO PIN will it to become locked. - */ -#define CKF_SO_PIN_FINAL_TRY 0x00200000UL - -/* CKF_SO_PIN_LOCKED. If it is true, the SO - * PIN has been locked. SO login to the token is not possible. - */ -#define CKF_SO_PIN_LOCKED 0x00400000UL - -/* CKF_SO_PIN_TO_BE_CHANGED. If it is true, - * the SO PIN value is the default value set by token - * initialization or manufacturing, or the PIN has been - * expired by the card. - */ -#define CKF_SO_PIN_TO_BE_CHANGED 0x00800000UL - -#define CKF_ERROR_STATE 0x01000000UL - -typedef CK_TOKEN_INFO CK_PTR CK_TOKEN_INFO_PTR; - - -/* CK_SESSION_HANDLE is a Cryptoki-assigned value that - * identifies a session - */ -typedef CK_ULONG CK_SESSION_HANDLE; - -typedef CK_SESSION_HANDLE CK_PTR CK_SESSION_HANDLE_PTR; - - -/* CK_USER_TYPE enumerates the types of Cryptoki users */ -typedef CK_ULONG CK_USER_TYPE; -/* Security Officer */ -#define CKU_SO 0UL -/* Normal user */ -#define CKU_USER 1UL -/* Context specific */ -#define CKU_CONTEXT_SPECIFIC 2UL - -/* CK_STATE enumerates the session states */ -typedef CK_ULONG CK_STATE; -#define CKS_RO_PUBLIC_SESSION 0UL -#define CKS_RO_USER_FUNCTIONS 1UL -#define CKS_RW_PUBLIC_SESSION 2UL -#define CKS_RW_USER_FUNCTIONS 3UL -#define CKS_RW_SO_FUNCTIONS 4UL - -/* CK_SESSION_INFO provides information about a session */ -typedef struct CK_SESSION_INFO { - CK_SLOT_ID slotID; - CK_STATE state; - CK_FLAGS flags; /* see below */ - CK_ULONG ulDeviceError; /* device-dependent error code */ -} CK_SESSION_INFO; - -/* The flags are defined in the following table: - * Bit Flag Mask Meaning - */ -#define CKF_RW_SESSION 0x00000002UL /* session is r/w */ -#define CKF_SERIAL_SESSION 0x00000004UL /* no parallel */ - -typedef CK_SESSION_INFO CK_PTR CK_SESSION_INFO_PTR; - - -/* CK_OBJECT_HANDLE is a token-specific identifier for an - * object - */ -typedef CK_ULONG CK_OBJECT_HANDLE; - -typedef CK_OBJECT_HANDLE CK_PTR CK_OBJECT_HANDLE_PTR; - - -/* CK_OBJECT_CLASS is a value that identifies the classes (or - * types) of objects that Cryptoki recognizes. It is defined - * as follows: - */ -typedef CK_ULONG CK_OBJECT_CLASS; - -/* The following classes of objects are defined: */ -#define CKO_DATA 0x00000000UL -#define CKO_CERTIFICATE 0x00000001UL -#define CKO_PUBLIC_KEY 0x00000002UL -#define CKO_PRIVATE_KEY 0x00000003UL -#define CKO_SECRET_KEY 0x00000004UL -#define CKO_HW_FEATURE 0x00000005UL -#define CKO_DOMAIN_PARAMETERS 0x00000006UL -#define CKO_MECHANISM 0x00000007UL -#define CKO_OTP_KEY 0x00000008UL - -#define CKO_VENDOR_DEFINED 0x80000000UL - -typedef CK_OBJECT_CLASS CK_PTR CK_OBJECT_CLASS_PTR; - -/* CK_HW_FEATURE_TYPE is a value that identifies the hardware feature type - * of an object with CK_OBJECT_CLASS equal to CKO_HW_FEATURE. - */ -typedef CK_ULONG CK_HW_FEATURE_TYPE; - -/* The following hardware feature types are defined */ -#define CKH_MONOTONIC_COUNTER 0x00000001UL -#define CKH_CLOCK 0x00000002UL -#define CKH_USER_INTERFACE 0x00000003UL -#define CKH_VENDOR_DEFINED 0x80000000UL - -/* CK_KEY_TYPE is a value that identifies a key type */ -typedef CK_ULONG CK_KEY_TYPE; - -/* the following key types are defined: */ -#define CKK_RSA 0x00000000UL -#define CKK_DSA 0x00000001UL -#define CKK_DH 0x00000002UL -#define CKK_ECDSA 0x00000003UL /* Deprecated */ -#define CKK_EC 0x00000003UL -#define CKK_X9_42_DH 0x00000004UL -#define CKK_KEA 0x00000005UL -#define CKK_GENERIC_SECRET 0x00000010UL -#define CKK_RC2 0x00000011UL -#define CKK_RC4 0x00000012UL -#define CKK_DES 0x00000013UL -#define CKK_DES2 0x00000014UL -#define CKK_DES3 0x00000015UL -#define CKK_CAST 0x00000016UL -#define CKK_CAST3 0x00000017UL -#define CKK_CAST5 0x00000018UL /* Deprecated */ -#define CKK_CAST128 0x00000018UL -#define CKK_RC5 0x00000019UL -#define CKK_IDEA 0x0000001AUL -#define CKK_SKIPJACK 0x0000001BUL -#define CKK_BATON 0x0000001CUL -#define CKK_JUNIPER 0x0000001DUL -#define CKK_CDMF 0x0000001EUL -#define CKK_AES 0x0000001FUL -#define CKK_BLOWFISH 0x00000020UL -#define CKK_TWOFISH 0x00000021UL -#define CKK_SECURID 0x00000022UL -#define CKK_HOTP 0x00000023UL -#define CKK_ACTI 0x00000024UL -#define CKK_CAMELLIA 0x00000025UL -#define CKK_ARIA 0x00000026UL - -#define CKK_MD5_HMAC 0x00000027UL -#define CKK_SHA_1_HMAC 0x00000028UL -#define CKK_RIPEMD128_HMAC 0x00000029UL -#define CKK_RIPEMD160_HMAC 0x0000002AUL -#define CKK_SHA256_HMAC 0x0000002BUL -#define CKK_SHA384_HMAC 0x0000002CUL -#define CKK_SHA512_HMAC 0x0000002DUL -#define CKK_SHA224_HMAC 0x0000002EUL - -#define CKK_SEED 0x0000002FUL -#define CKK_GOSTR3410 0x00000030UL -#define CKK_GOSTR3411 0x00000031UL -#define CKK_GOST28147 0x00000032UL - - - -#define CKK_VENDOR_DEFINED 0x80000000UL - - -/* CK_CERTIFICATE_TYPE is a value that identifies a certificate - * type - */ -typedef CK_ULONG CK_CERTIFICATE_TYPE; - -#define CK_CERTIFICATE_CATEGORY_UNSPECIFIED 0UL -#define CK_CERTIFICATE_CATEGORY_TOKEN_USER 1UL -#define CK_CERTIFICATE_CATEGORY_AUTHORITY 2UL -#define CK_CERTIFICATE_CATEGORY_OTHER_ENTITY 3UL - -#define CK_SECURITY_DOMAIN_UNSPECIFIED 0UL -#define CK_SECURITY_DOMAIN_MANUFACTURER 1UL -#define CK_SECURITY_DOMAIN_OPERATOR 2UL -#define CK_SECURITY_DOMAIN_THIRD_PARTY 3UL - - -/* The following certificate types are defined: */ -#define CKC_X_509 0x00000000UL -#define CKC_X_509_ATTR_CERT 0x00000001UL -#define CKC_WTLS 0x00000002UL -#define CKC_VENDOR_DEFINED 0x80000000UL - - -/* CK_ATTRIBUTE_TYPE is a value that identifies an attribute - * type - */ -typedef CK_ULONG CK_ATTRIBUTE_TYPE; - -/* The CKF_ARRAY_ATTRIBUTE flag identifies an attribute which - * consists of an array of values. - */ -#define CKF_ARRAY_ATTRIBUTE 0x40000000UL - -/* The following OTP-related defines relate to the CKA_OTP_FORMAT attribute */ -#define CK_OTP_FORMAT_DECIMAL 0UL -#define CK_OTP_FORMAT_HEXADECIMAL 1UL -#define CK_OTP_FORMAT_ALPHANUMERIC 2UL -#define CK_OTP_FORMAT_BINARY 3UL - -/* The following OTP-related defines relate to the CKA_OTP_..._REQUIREMENT - * attributes - */ -#define CK_OTP_PARAM_IGNORED 0UL -#define CK_OTP_PARAM_OPTIONAL 1UL -#define CK_OTP_PARAM_MANDATORY 2UL - -/* The following attribute types are defined: */ -#define CKA_CLASS 0x00000000UL -#define CKA_TOKEN 0x00000001UL -#define CKA_PRIVATE 0x00000002UL -#define CKA_LABEL 0x00000003UL -#define CKA_APPLICATION 0x00000010UL -#define CKA_VALUE 0x00000011UL -#define CKA_OBJECT_ID 0x00000012UL -#define CKA_CERTIFICATE_TYPE 0x00000080UL -#define CKA_ISSUER 0x00000081UL -#define CKA_SERIAL_NUMBER 0x00000082UL -#define CKA_AC_ISSUER 0x00000083UL -#define CKA_OWNER 0x00000084UL -#define CKA_ATTR_TYPES 0x00000085UL -#define CKA_TRUSTED 0x00000086UL -#define CKA_CERTIFICATE_CATEGORY 0x00000087UL -#define CKA_JAVA_MIDP_SECURITY_DOMAIN 0x00000088UL -#define CKA_URL 0x00000089UL -#define CKA_HASH_OF_SUBJECT_PUBLIC_KEY 0x0000008AUL -#define CKA_HASH_OF_ISSUER_PUBLIC_KEY 0x0000008BUL -#define CKA_NAME_HASH_ALGORITHM 0x0000008CUL -#define CKA_CHECK_VALUE 0x00000090UL - -#define CKA_KEY_TYPE 0x00000100UL -#define CKA_SUBJECT 0x00000101UL -#define CKA_ID 0x00000102UL -#define CKA_SENSITIVE 0x00000103UL -#define CKA_ENCRYPT 0x00000104UL -#define CKA_DECRYPT 0x00000105UL -#define CKA_WRAP 0x00000106UL -#define CKA_UNWRAP 0x00000107UL -#define CKA_SIGN 0x00000108UL -#define CKA_SIGN_RECOVER 0x00000109UL -#define CKA_VERIFY 0x0000010AUL -#define CKA_VERIFY_RECOVER 0x0000010BUL -#define CKA_DERIVE 0x0000010CUL -#define CKA_START_DATE 0x00000110UL -#define CKA_END_DATE 0x00000111UL -#define CKA_MODULUS 0x00000120UL -#define CKA_MODULUS_BITS 0x00000121UL -#define CKA_PUBLIC_EXPONENT 0x00000122UL -#define CKA_PRIVATE_EXPONENT 0x00000123UL -#define CKA_PRIME_1 0x00000124UL -#define CKA_PRIME_2 0x00000125UL -#define CKA_EXPONENT_1 0x00000126UL -#define CKA_EXPONENT_2 0x00000127UL -#define CKA_COEFFICIENT 0x00000128UL -#define CKA_PUBLIC_KEY_INFO 0x00000129UL -#define CKA_PRIME 0x00000130UL -#define CKA_SUBPRIME 0x00000131UL -#define CKA_BASE 0x00000132UL - -#define CKA_PRIME_BITS 0x00000133UL -#define CKA_SUBPRIME_BITS 0x00000134UL -#define CKA_SUB_PRIME_BITS CKA_SUBPRIME_BITS - -#define CKA_VALUE_BITS 0x00000160UL -#define CKA_VALUE_LEN 0x00000161UL -#define CKA_EXTRACTABLE 0x00000162UL -#define CKA_LOCAL 0x00000163UL -#define CKA_NEVER_EXTRACTABLE 0x00000164UL -#define CKA_ALWAYS_SENSITIVE 0x00000165UL -#define CKA_KEY_GEN_MECHANISM 0x00000166UL - -#define CKA_MODIFIABLE 0x00000170UL -#define CKA_COPYABLE 0x00000171UL - -#define CKA_DESTROYABLE 0x00000172UL - -#define CKA_ECDSA_PARAMS 0x00000180UL /* Deprecated */ -#define CKA_EC_PARAMS 0x00000180UL - -#define CKA_EC_POINT 0x00000181UL - -#define CKA_SECONDARY_AUTH 0x00000200UL /* Deprecated */ -#define CKA_AUTH_PIN_FLAGS 0x00000201UL /* Deprecated */ - -#define CKA_ALWAYS_AUTHENTICATE 0x00000202UL - -#define CKA_WRAP_WITH_TRUSTED 0x00000210UL -#define CKA_WRAP_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000211UL) -#define CKA_UNWRAP_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000212UL) -#define CKA_DERIVE_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000213UL) - -#define CKA_OTP_FORMAT 0x00000220UL -#define CKA_OTP_LENGTH 0x00000221UL -#define CKA_OTP_TIME_INTERVAL 0x00000222UL -#define CKA_OTP_USER_FRIENDLY_MODE 0x00000223UL -#define CKA_OTP_CHALLENGE_REQUIREMENT 0x00000224UL -#define CKA_OTP_TIME_REQUIREMENT 0x00000225UL -#define CKA_OTP_COUNTER_REQUIREMENT 0x00000226UL -#define CKA_OTP_PIN_REQUIREMENT 0x00000227UL -#define CKA_OTP_COUNTER 0x0000022EUL -#define CKA_OTP_TIME 0x0000022FUL -#define CKA_OTP_USER_IDENTIFIER 0x0000022AUL -#define CKA_OTP_SERVICE_IDENTIFIER 0x0000022BUL -#define CKA_OTP_SERVICE_LOGO 0x0000022CUL -#define CKA_OTP_SERVICE_LOGO_TYPE 0x0000022DUL - -#define CKA_GOSTR3410_PARAMS 0x00000250UL -#define CKA_GOSTR3411_PARAMS 0x00000251UL -#define CKA_GOST28147_PARAMS 0x00000252UL - -#define CKA_HW_FEATURE_TYPE 0x00000300UL -#define CKA_RESET_ON_INIT 0x00000301UL -#define CKA_HAS_RESET 0x00000302UL - -#define CKA_PIXEL_X 0x00000400UL -#define CKA_PIXEL_Y 0x00000401UL -#define CKA_RESOLUTION 0x00000402UL -#define CKA_CHAR_ROWS 0x00000403UL -#define CKA_CHAR_COLUMNS 0x00000404UL -#define CKA_COLOR 0x00000405UL -#define CKA_BITS_PER_PIXEL 0x00000406UL -#define CKA_CHAR_SETS 0x00000480UL -#define CKA_ENCODING_METHODS 0x00000481UL -#define CKA_MIME_TYPES 0x00000482UL -#define CKA_MECHANISM_TYPE 0x00000500UL -#define CKA_REQUIRED_CMS_ATTRIBUTES 0x00000501UL -#define CKA_DEFAULT_CMS_ATTRIBUTES 0x00000502UL -#define CKA_SUPPORTED_CMS_ATTRIBUTES 0x00000503UL -#define CKA_ALLOWED_MECHANISMS (CKF_ARRAY_ATTRIBUTE|0x00000600UL) - -#define CKA_VENDOR_DEFINED 0x80000000UL - -/* CK_ATTRIBUTE is a structure that includes the type, length - * and value of an attribute - */ -typedef struct CK_ATTRIBUTE { - CK_ATTRIBUTE_TYPE type; - CK_VOID_PTR pValue; - CK_ULONG ulValueLen; /* in bytes */ -} CK_ATTRIBUTE; - -typedef CK_ATTRIBUTE CK_PTR CK_ATTRIBUTE_PTR; - -/* CK_DATE is a structure that defines a date */ -typedef struct CK_DATE{ - CK_CHAR year[4]; /* the year ("1900" - "9999") */ - CK_CHAR month[2]; /* the month ("01" - "12") */ - CK_CHAR day[2]; /* the day ("01" - "31") */ -} CK_DATE; - - -/* CK_MECHANISM_TYPE is a value that identifies a mechanism - * type - */ -typedef CK_ULONG CK_MECHANISM_TYPE; - -/* the following mechanism types are defined: */ -#define CKM_RSA_PKCS_KEY_PAIR_GEN 0x00000000UL -#define CKM_RSA_PKCS 0x00000001UL -#define CKM_RSA_9796 0x00000002UL -#define CKM_RSA_X_509 0x00000003UL - -#define CKM_MD2_RSA_PKCS 0x00000004UL -#define CKM_MD5_RSA_PKCS 0x00000005UL -#define CKM_SHA1_RSA_PKCS 0x00000006UL - -#define CKM_RIPEMD128_RSA_PKCS 0x00000007UL -#define CKM_RIPEMD160_RSA_PKCS 0x00000008UL -#define CKM_RSA_PKCS_OAEP 0x00000009UL - -#define CKM_RSA_X9_31_KEY_PAIR_GEN 0x0000000AUL -#define CKM_RSA_X9_31 0x0000000BUL -#define CKM_SHA1_RSA_X9_31 0x0000000CUL -#define CKM_RSA_PKCS_PSS 0x0000000DUL -#define CKM_SHA1_RSA_PKCS_PSS 0x0000000EUL - -#define CKM_DSA_KEY_PAIR_GEN 0x00000010UL -#define CKM_DSA 0x00000011UL -#define CKM_DSA_SHA1 0x00000012UL -#define CKM_DSA_SHA224 0x00000013UL -#define CKM_DSA_SHA256 0x00000014UL -#define CKM_DSA_SHA384 0x00000015UL -#define CKM_DSA_SHA512 0x00000016UL - -#define CKM_DH_PKCS_KEY_PAIR_GEN 0x00000020UL -#define CKM_DH_PKCS_DERIVE 0x00000021UL - -#define CKM_X9_42_DH_KEY_PAIR_GEN 0x00000030UL -#define CKM_X9_42_DH_DERIVE 0x00000031UL -#define CKM_X9_42_DH_HYBRID_DERIVE 0x00000032UL -#define CKM_X9_42_MQV_DERIVE 0x00000033UL - -#define CKM_SHA256_RSA_PKCS 0x00000040UL -#define CKM_SHA384_RSA_PKCS 0x00000041UL -#define CKM_SHA512_RSA_PKCS 0x00000042UL -#define CKM_SHA256_RSA_PKCS_PSS 0x00000043UL -#define CKM_SHA384_RSA_PKCS_PSS 0x00000044UL -#define CKM_SHA512_RSA_PKCS_PSS 0x00000045UL - -#define CKM_SHA224_RSA_PKCS 0x00000046UL -#define CKM_SHA224_RSA_PKCS_PSS 0x00000047UL - -#define CKM_SHA512_224 0x00000048UL -#define CKM_SHA512_224_HMAC 0x00000049UL -#define CKM_SHA512_224_HMAC_GENERAL 0x0000004AUL -#define CKM_SHA512_224_KEY_DERIVATION 0x0000004BUL -#define CKM_SHA512_256 0x0000004CUL -#define CKM_SHA512_256_HMAC 0x0000004DUL -#define CKM_SHA512_256_HMAC_GENERAL 0x0000004EUL -#define CKM_SHA512_256_KEY_DERIVATION 0x0000004FUL - -#define CKM_SHA512_T 0x00000050UL -#define CKM_SHA512_T_HMAC 0x00000051UL -#define CKM_SHA512_T_HMAC_GENERAL 0x00000052UL -#define CKM_SHA512_T_KEY_DERIVATION 0x00000053UL - -#define CKM_RC2_KEY_GEN 0x00000100UL -#define CKM_RC2_ECB 0x00000101UL -#define CKM_RC2_CBC 0x00000102UL -#define CKM_RC2_MAC 0x00000103UL - -#define CKM_RC2_MAC_GENERAL 0x00000104UL -#define CKM_RC2_CBC_PAD 0x00000105UL - -#define CKM_RC4_KEY_GEN 0x00000110UL -#define CKM_RC4 0x00000111UL -#define CKM_DES_KEY_GEN 0x00000120UL -#define CKM_DES_ECB 0x00000121UL -#define CKM_DES_CBC 0x00000122UL -#define CKM_DES_MAC 0x00000123UL - -#define CKM_DES_MAC_GENERAL 0x00000124UL -#define CKM_DES_CBC_PAD 0x00000125UL - -#define CKM_DES2_KEY_GEN 0x00000130UL -#define CKM_DES3_KEY_GEN 0x00000131UL -#define CKM_DES3_ECB 0x00000132UL -#define CKM_DES3_CBC 0x00000133UL -#define CKM_DES3_MAC 0x00000134UL - -#define CKM_DES3_MAC_GENERAL 0x00000135UL -#define CKM_DES3_CBC_PAD 0x00000136UL -#define CKM_DES3_CMAC_GENERAL 0x00000137UL -#define CKM_DES3_CMAC 0x00000138UL -#define CKM_CDMF_KEY_GEN 0x00000140UL -#define CKM_CDMF_ECB 0x00000141UL -#define CKM_CDMF_CBC 0x00000142UL -#define CKM_CDMF_MAC 0x00000143UL -#define CKM_CDMF_MAC_GENERAL 0x00000144UL -#define CKM_CDMF_CBC_PAD 0x00000145UL - -#define CKM_DES_OFB64 0x00000150UL -#define CKM_DES_OFB8 0x00000151UL -#define CKM_DES_CFB64 0x00000152UL -#define CKM_DES_CFB8 0x00000153UL - -#define CKM_MD2 0x00000200UL - -#define CKM_MD2_HMAC 0x00000201UL -#define CKM_MD2_HMAC_GENERAL 0x00000202UL - -#define CKM_MD5 0x00000210UL - -#define CKM_MD5_HMAC 0x00000211UL -#define CKM_MD5_HMAC_GENERAL 0x00000212UL - -#define CKM_SHA_1 0x00000220UL - -#define CKM_SHA_1_HMAC 0x00000221UL -#define CKM_SHA_1_HMAC_GENERAL 0x00000222UL - -#define CKM_RIPEMD128 0x00000230UL -#define CKM_RIPEMD128_HMAC 0x00000231UL -#define CKM_RIPEMD128_HMAC_GENERAL 0x00000232UL -#define CKM_RIPEMD160 0x00000240UL -#define CKM_RIPEMD160_HMAC 0x00000241UL -#define CKM_RIPEMD160_HMAC_GENERAL 0x00000242UL - -#define CKM_SHA256 0x00000250UL -#define CKM_SHA256_HMAC 0x00000251UL -#define CKM_SHA256_HMAC_GENERAL 0x00000252UL -#define CKM_SHA224 0x00000255UL -#define CKM_SHA224_HMAC 0x00000256UL -#define CKM_SHA224_HMAC_GENERAL 0x00000257UL -#define CKM_SHA384 0x00000260UL -#define CKM_SHA384_HMAC 0x00000261UL -#define CKM_SHA384_HMAC_GENERAL 0x00000262UL -#define CKM_SHA512 0x00000270UL -#define CKM_SHA512_HMAC 0x00000271UL -#define CKM_SHA512_HMAC_GENERAL 0x00000272UL -#define CKM_SECURID_KEY_GEN 0x00000280UL -#define CKM_SECURID 0x00000282UL -#define CKM_HOTP_KEY_GEN 0x00000290UL -#define CKM_HOTP 0x00000291UL -#define CKM_ACTI 0x000002A0UL -#define CKM_ACTI_KEY_GEN 0x000002A1UL - -#define CKM_CAST_KEY_GEN 0x00000300UL -#define CKM_CAST_ECB 0x00000301UL -#define CKM_CAST_CBC 0x00000302UL -#define CKM_CAST_MAC 0x00000303UL -#define CKM_CAST_MAC_GENERAL 0x00000304UL -#define CKM_CAST_CBC_PAD 0x00000305UL -#define CKM_CAST3_KEY_GEN 0x00000310UL -#define CKM_CAST3_ECB 0x00000311UL -#define CKM_CAST3_CBC 0x00000312UL -#define CKM_CAST3_MAC 0x00000313UL -#define CKM_CAST3_MAC_GENERAL 0x00000314UL -#define CKM_CAST3_CBC_PAD 0x00000315UL -/* Note that CAST128 and CAST5 are the same algorithm */ -#define CKM_CAST5_KEY_GEN 0x00000320UL -#define CKM_CAST128_KEY_GEN 0x00000320UL -#define CKM_CAST5_ECB 0x00000321UL -#define CKM_CAST128_ECB 0x00000321UL -#define CKM_CAST5_CBC 0x00000322UL /* Deprecated */ -#define CKM_CAST128_CBC 0x00000322UL -#define CKM_CAST5_MAC 0x00000323UL /* Deprecated */ -#define CKM_CAST128_MAC 0x00000323UL -#define CKM_CAST5_MAC_GENERAL 0x00000324UL /* Deprecated */ -#define CKM_CAST128_MAC_GENERAL 0x00000324UL -#define CKM_CAST5_CBC_PAD 0x00000325UL /* Deprecated */ -#define CKM_CAST128_CBC_PAD 0x00000325UL -#define CKM_RC5_KEY_GEN 0x00000330UL -#define CKM_RC5_ECB 0x00000331UL -#define CKM_RC5_CBC 0x00000332UL -#define CKM_RC5_MAC 0x00000333UL -#define CKM_RC5_MAC_GENERAL 0x00000334UL -#define CKM_RC5_CBC_PAD 0x00000335UL -#define CKM_IDEA_KEY_GEN 0x00000340UL -#define CKM_IDEA_ECB 0x00000341UL -#define CKM_IDEA_CBC 0x00000342UL -#define CKM_IDEA_MAC 0x00000343UL -#define CKM_IDEA_MAC_GENERAL 0x00000344UL -#define CKM_IDEA_CBC_PAD 0x00000345UL -#define CKM_GENERIC_SECRET_KEY_GEN 0x00000350UL -#define CKM_CONCATENATE_BASE_AND_KEY 0x00000360UL -#define CKM_CONCATENATE_BASE_AND_DATA 0x00000362UL -#define CKM_CONCATENATE_DATA_AND_BASE 0x00000363UL -#define CKM_XOR_BASE_AND_DATA 0x00000364UL -#define CKM_EXTRACT_KEY_FROM_KEY 0x00000365UL -#define CKM_SSL3_PRE_MASTER_KEY_GEN 0x00000370UL -#define CKM_SSL3_MASTER_KEY_DERIVE 0x00000371UL -#define CKM_SSL3_KEY_AND_MAC_DERIVE 0x00000372UL - -#define CKM_SSL3_MASTER_KEY_DERIVE_DH 0x00000373UL -#define CKM_TLS_PRE_MASTER_KEY_GEN 0x00000374UL -#define CKM_TLS_MASTER_KEY_DERIVE 0x00000375UL -#define CKM_TLS_KEY_AND_MAC_DERIVE 0x00000376UL -#define CKM_TLS_MASTER_KEY_DERIVE_DH 0x00000377UL - -#define CKM_TLS_PRF 0x00000378UL - -#define CKM_SSL3_MD5_MAC 0x00000380UL -#define CKM_SSL3_SHA1_MAC 0x00000381UL -#define CKM_MD5_KEY_DERIVATION 0x00000390UL -#define CKM_MD2_KEY_DERIVATION 0x00000391UL -#define CKM_SHA1_KEY_DERIVATION 0x00000392UL - -#define CKM_SHA256_KEY_DERIVATION 0x00000393UL -#define CKM_SHA384_KEY_DERIVATION 0x00000394UL -#define CKM_SHA512_KEY_DERIVATION 0x00000395UL -#define CKM_SHA224_KEY_DERIVATION 0x00000396UL - -#define CKM_PBE_MD2_DES_CBC 0x000003A0UL -#define CKM_PBE_MD5_DES_CBC 0x000003A1UL -#define CKM_PBE_MD5_CAST_CBC 0x000003A2UL -#define CKM_PBE_MD5_CAST3_CBC 0x000003A3UL -#define CKM_PBE_MD5_CAST5_CBC 0x000003A4UL /* Deprecated */ -#define CKM_PBE_MD5_CAST128_CBC 0x000003A4UL -#define CKM_PBE_SHA1_CAST5_CBC 0x000003A5UL /* Deprecated */ -#define CKM_PBE_SHA1_CAST128_CBC 0x000003A5UL -#define CKM_PBE_SHA1_RC4_128 0x000003A6UL -#define CKM_PBE_SHA1_RC4_40 0x000003A7UL -#define CKM_PBE_SHA1_DES3_EDE_CBC 0x000003A8UL -#define CKM_PBE_SHA1_DES2_EDE_CBC 0x000003A9UL -#define CKM_PBE_SHA1_RC2_128_CBC 0x000003AAUL -#define CKM_PBE_SHA1_RC2_40_CBC 0x000003ABUL - -#define CKM_PKCS5_PBKD2 0x000003B0UL - -#define CKM_PBA_SHA1_WITH_SHA1_HMAC 0x000003C0UL - -#define CKM_WTLS_PRE_MASTER_KEY_GEN 0x000003D0UL -#define CKM_WTLS_MASTER_KEY_DERIVE 0x000003D1UL -#define CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC 0x000003D2UL -#define CKM_WTLS_PRF 0x000003D3UL -#define CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE 0x000003D4UL -#define CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE 0x000003D5UL - -#define CKM_TLS10_MAC_SERVER 0x000003D6UL -#define CKM_TLS10_MAC_CLIENT 0x000003D7UL -#define CKM_TLS12_MAC 0x000003D8UL -#define CKM_TLS12_KDF 0x000003D9UL -#define CKM_TLS12_MASTER_KEY_DERIVE 0x000003E0UL -#define CKM_TLS12_KEY_AND_MAC_DERIVE 0x000003E1UL -#define CKM_TLS12_MASTER_KEY_DERIVE_DH 0x000003E2UL -#define CKM_TLS12_KEY_SAFE_DERIVE 0x000003E3UL -#define CKM_TLS_MAC 0x000003E4UL -#define CKM_TLS_KDF 0x000003E5UL - -#define CKM_KEY_WRAP_LYNKS 0x00000400UL -#define CKM_KEY_WRAP_SET_OAEP 0x00000401UL - -#define CKM_CMS_SIG 0x00000500UL -#define CKM_KIP_DERIVE 0x00000510UL -#define CKM_KIP_WRAP 0x00000511UL -#define CKM_KIP_MAC 0x00000512UL - -#define CKM_CAMELLIA_KEY_GEN 0x00000550UL -#define CKM_CAMELLIA_ECB 0x00000551UL -#define CKM_CAMELLIA_CBC 0x00000552UL -#define CKM_CAMELLIA_MAC 0x00000553UL -#define CKM_CAMELLIA_MAC_GENERAL 0x00000554UL -#define CKM_CAMELLIA_CBC_PAD 0x00000555UL -#define CKM_CAMELLIA_ECB_ENCRYPT_DATA 0x00000556UL -#define CKM_CAMELLIA_CBC_ENCRYPT_DATA 0x00000557UL -#define CKM_CAMELLIA_CTR 0x00000558UL - -#define CKM_ARIA_KEY_GEN 0x00000560UL -#define CKM_ARIA_ECB 0x00000561UL -#define CKM_ARIA_CBC 0x00000562UL -#define CKM_ARIA_MAC 0x00000563UL -#define CKM_ARIA_MAC_GENERAL 0x00000564UL -#define CKM_ARIA_CBC_PAD 0x00000565UL -#define CKM_ARIA_ECB_ENCRYPT_DATA 0x00000566UL -#define CKM_ARIA_CBC_ENCRYPT_DATA 0x00000567UL - -#define CKM_SEED_KEY_GEN 0x00000650UL -#define CKM_SEED_ECB 0x00000651UL -#define CKM_SEED_CBC 0x00000652UL -#define CKM_SEED_MAC 0x00000653UL -#define CKM_SEED_MAC_GENERAL 0x00000654UL -#define CKM_SEED_CBC_PAD 0x00000655UL -#define CKM_SEED_ECB_ENCRYPT_DATA 0x00000656UL -#define CKM_SEED_CBC_ENCRYPT_DATA 0x00000657UL - -#define CKM_SKIPJACK_KEY_GEN 0x00001000UL -#define CKM_SKIPJACK_ECB64 0x00001001UL -#define CKM_SKIPJACK_CBC64 0x00001002UL -#define CKM_SKIPJACK_OFB64 0x00001003UL -#define CKM_SKIPJACK_CFB64 0x00001004UL -#define CKM_SKIPJACK_CFB32 0x00001005UL -#define CKM_SKIPJACK_CFB16 0x00001006UL -#define CKM_SKIPJACK_CFB8 0x00001007UL -#define CKM_SKIPJACK_WRAP 0x00001008UL -#define CKM_SKIPJACK_PRIVATE_WRAP 0x00001009UL -#define CKM_SKIPJACK_RELAYX 0x0000100aUL -#define CKM_KEA_KEY_PAIR_GEN 0x00001010UL -#define CKM_KEA_KEY_DERIVE 0x00001011UL -#define CKM_KEA_DERIVE 0x00001012UL -#define CKM_FORTEZZA_TIMESTAMP 0x00001020UL -#define CKM_BATON_KEY_GEN 0x00001030UL -#define CKM_BATON_ECB128 0x00001031UL -#define CKM_BATON_ECB96 0x00001032UL -#define CKM_BATON_CBC128 0x00001033UL -#define CKM_BATON_COUNTER 0x00001034UL -#define CKM_BATON_SHUFFLE 0x00001035UL -#define CKM_BATON_WRAP 0x00001036UL - -#define CKM_ECDSA_KEY_PAIR_GEN 0x00001040UL /* Deprecated */ -#define CKM_EC_KEY_PAIR_GEN 0x00001040UL - -#define CKM_ECDSA 0x00001041UL -#define CKM_ECDSA_SHA1 0x00001042UL -#define CKM_ECDSA_SHA224 0x00001043UL -#define CKM_ECDSA_SHA256 0x00001044UL -#define CKM_ECDSA_SHA384 0x00001045UL -#define CKM_ECDSA_SHA512 0x00001046UL - -#define CKM_ECDH1_DERIVE 0x00001050UL -#define CKM_ECDH1_COFACTOR_DERIVE 0x00001051UL -#define CKM_ECMQV_DERIVE 0x00001052UL - -#define CKM_ECDH_AES_KEY_WRAP 0x00001053UL -#define CKM_RSA_AES_KEY_WRAP 0x00001054UL - -#define CKM_JUNIPER_KEY_GEN 0x00001060UL -#define CKM_JUNIPER_ECB128 0x00001061UL -#define CKM_JUNIPER_CBC128 0x00001062UL -#define CKM_JUNIPER_COUNTER 0x00001063UL -#define CKM_JUNIPER_SHUFFLE 0x00001064UL -#define CKM_JUNIPER_WRAP 0x00001065UL -#define CKM_FASTHASH 0x00001070UL - -#define CKM_AES_KEY_GEN 0x00001080UL -#define CKM_AES_ECB 0x00001081UL -#define CKM_AES_CBC 0x00001082UL -#define CKM_AES_MAC 0x00001083UL -#define CKM_AES_MAC_GENERAL 0x00001084UL -#define CKM_AES_CBC_PAD 0x00001085UL -#define CKM_AES_CTR 0x00001086UL -#define CKM_AES_GCM 0x00001087UL -#define CKM_AES_CCM 0x00001088UL -#define CKM_AES_CTS 0x00001089UL -#define CKM_AES_CMAC 0x0000108AUL -#define CKM_AES_CMAC_GENERAL 0x0000108BUL - -#define CKM_AES_XCBC_MAC 0x0000108CUL -#define CKM_AES_XCBC_MAC_96 0x0000108DUL -#define CKM_AES_GMAC 0x0000108EUL - -#define CKM_BLOWFISH_KEY_GEN 0x00001090UL -#define CKM_BLOWFISH_CBC 0x00001091UL -#define CKM_TWOFISH_KEY_GEN 0x00001092UL -#define CKM_TWOFISH_CBC 0x00001093UL -#define CKM_BLOWFISH_CBC_PAD 0x00001094UL -#define CKM_TWOFISH_CBC_PAD 0x00001095UL - -#define CKM_DES_ECB_ENCRYPT_DATA 0x00001100UL -#define CKM_DES_CBC_ENCRYPT_DATA 0x00001101UL -#define CKM_DES3_ECB_ENCRYPT_DATA 0x00001102UL -#define CKM_DES3_CBC_ENCRYPT_DATA 0x00001103UL -#define CKM_AES_ECB_ENCRYPT_DATA 0x00001104UL -#define CKM_AES_CBC_ENCRYPT_DATA 0x00001105UL - -#define CKM_GOSTR3410_KEY_PAIR_GEN 0x00001200UL -#define CKM_GOSTR3410 0x00001201UL -#define CKM_GOSTR3410_WITH_GOSTR3411 0x00001202UL -#define CKM_GOSTR3410_KEY_WRAP 0x00001203UL -#define CKM_GOSTR3410_DERIVE 0x00001204UL -#define CKM_GOSTR3411 0x00001210UL -#define CKM_GOSTR3411_HMAC 0x00001211UL -#define CKM_GOST28147_KEY_GEN 0x00001220UL -#define CKM_GOST28147_ECB 0x00001221UL -#define CKM_GOST28147 0x00001222UL -#define CKM_GOST28147_MAC 0x00001223UL -#define CKM_GOST28147_KEY_WRAP 0x00001224UL - -#define CKM_DSA_PARAMETER_GEN 0x00002000UL -#define CKM_DH_PKCS_PARAMETER_GEN 0x00002001UL -#define CKM_X9_42_DH_PARAMETER_GEN 0x00002002UL -#define CKM_DSA_PROBABLISTIC_PARAMETER_GEN 0x00002003UL -#define CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN 0x00002004UL - -#define CKM_AES_OFB 0x00002104UL -#define CKM_AES_CFB64 0x00002105UL -#define CKM_AES_CFB8 0x00002106UL -#define CKM_AES_CFB128 0x00002107UL - -#define CKM_AES_CFB1 0x00002108UL -#define CKM_AES_KEY_WRAP 0x00002109UL /* WAS: 0x00001090 */ -#define CKM_AES_KEY_WRAP_PAD 0x0000210AUL /* WAS: 0x00001091 */ - -#define CKM_RSA_PKCS_TPM_1_1 0x00004001UL -#define CKM_RSA_PKCS_OAEP_TPM_1_1 0x00004002UL - -#define CKM_VENDOR_DEFINED 0x80000000UL - -typedef CK_MECHANISM_TYPE CK_PTR CK_MECHANISM_TYPE_PTR; - - -/* CK_MECHANISM is a structure that specifies a particular - * mechanism - */ -typedef struct CK_MECHANISM { - CK_MECHANISM_TYPE mechanism; - CK_VOID_PTR pParameter; - CK_ULONG ulParameterLen; /* in bytes */ -} CK_MECHANISM; - -typedef CK_MECHANISM CK_PTR CK_MECHANISM_PTR; - - -/* CK_MECHANISM_INFO provides information about a particular - * mechanism - */ -typedef struct CK_MECHANISM_INFO { - CK_ULONG ulMinKeySize; - CK_ULONG ulMaxKeySize; - CK_FLAGS flags; -} CK_MECHANISM_INFO; - -/* The flags are defined as follows: - * Bit Flag Mask Meaning */ -#define CKF_HW 0x00000001UL /* performed by HW */ - -/* Specify whether or not a mechanism can be used for a particular task */ -#define CKF_ENCRYPT 0x00000100UL -#define CKF_DECRYPT 0x00000200UL -#define CKF_DIGEST 0x00000400UL -#define CKF_SIGN 0x00000800UL -#define CKF_SIGN_RECOVER 0x00001000UL -#define CKF_VERIFY 0x00002000UL -#define CKF_VERIFY_RECOVER 0x00004000UL -#define CKF_GENERATE 0x00008000UL -#define CKF_GENERATE_KEY_PAIR 0x00010000UL -#define CKF_WRAP 0x00020000UL -#define CKF_UNWRAP 0x00040000UL -#define CKF_DERIVE 0x00080000UL - -/* Describe a token's EC capabilities not available in mechanism - * information. - */ -#define CKF_EC_F_P 0x00100000UL -#define CKF_EC_F_2M 0x00200000UL -#define CKF_EC_ECPARAMETERS 0x00400000UL -#define CKF_EC_NAMEDCURVE 0x00800000UL -#define CKF_EC_UNCOMPRESS 0x01000000UL -#define CKF_EC_COMPRESS 0x02000000UL - -#define CKF_EXTENSION 0x80000000UL - -typedef CK_MECHANISM_INFO CK_PTR CK_MECHANISM_INFO_PTR; - -/* CK_RV is a value that identifies the return value of a - * Cryptoki function - */ -typedef CK_ULONG CK_RV; - -#define CKR_OK 0x00000000UL -#define CKR_CANCEL 0x00000001UL -#define CKR_HOST_MEMORY 0x00000002UL -#define CKR_SLOT_ID_INVALID 0x00000003UL - -#define CKR_GENERAL_ERROR 0x00000005UL -#define CKR_FUNCTION_FAILED 0x00000006UL - -#define CKR_ARGUMENTS_BAD 0x00000007UL -#define CKR_NO_EVENT 0x00000008UL -#define CKR_NEED_TO_CREATE_THREADS 0x00000009UL -#define CKR_CANT_LOCK 0x0000000AUL - -#define CKR_ATTRIBUTE_READ_ONLY 0x00000010UL -#define CKR_ATTRIBUTE_SENSITIVE 0x00000011UL -#define CKR_ATTRIBUTE_TYPE_INVALID 0x00000012UL -#define CKR_ATTRIBUTE_VALUE_INVALID 0x00000013UL - -#define CKR_ACTION_PROHIBITED 0x0000001BUL - -#define CKR_DATA_INVALID 0x00000020UL -#define CKR_DATA_LEN_RANGE 0x00000021UL -#define CKR_DEVICE_ERROR 0x00000030UL -#define CKR_DEVICE_MEMORY 0x00000031UL -#define CKR_DEVICE_REMOVED 0x00000032UL -#define CKR_ENCRYPTED_DATA_INVALID 0x00000040UL -#define CKR_ENCRYPTED_DATA_LEN_RANGE 0x00000041UL -#define CKR_FUNCTION_CANCELED 0x00000050UL -#define CKR_FUNCTION_NOT_PARALLEL 0x00000051UL - -#define CKR_FUNCTION_NOT_SUPPORTED 0x00000054UL - -#define CKR_KEY_HANDLE_INVALID 0x00000060UL - -#define CKR_KEY_SIZE_RANGE 0x00000062UL -#define CKR_KEY_TYPE_INCONSISTENT 0x00000063UL - -#define CKR_KEY_NOT_NEEDED 0x00000064UL -#define CKR_KEY_CHANGED 0x00000065UL -#define CKR_KEY_NEEDED 0x00000066UL -#define CKR_KEY_INDIGESTIBLE 0x00000067UL -#define CKR_KEY_FUNCTION_NOT_PERMITTED 0x00000068UL -#define CKR_KEY_NOT_WRAPPABLE 0x00000069UL -#define CKR_KEY_UNEXTRACTABLE 0x0000006AUL - -#define CKR_MECHANISM_INVALID 0x00000070UL -#define CKR_MECHANISM_PARAM_INVALID 0x00000071UL - -#define CKR_OBJECT_HANDLE_INVALID 0x00000082UL -#define CKR_OPERATION_ACTIVE 0x00000090UL -#define CKR_OPERATION_NOT_INITIALIZED 0x00000091UL -#define CKR_PIN_INCORRECT 0x000000A0UL -#define CKR_PIN_INVALID 0x000000A1UL -#define CKR_PIN_LEN_RANGE 0x000000A2UL - -#define CKR_PIN_EXPIRED 0x000000A3UL -#define CKR_PIN_LOCKED 0x000000A4UL - -#define CKR_SESSION_CLOSED 0x000000B0UL -#define CKR_SESSION_COUNT 0x000000B1UL -#define CKR_SESSION_HANDLE_INVALID 0x000000B3UL -#define CKR_SESSION_PARALLEL_NOT_SUPPORTED 0x000000B4UL -#define CKR_SESSION_READ_ONLY 0x000000B5UL -#define CKR_SESSION_EXISTS 0x000000B6UL - -#define CKR_SESSION_READ_ONLY_EXISTS 0x000000B7UL -#define CKR_SESSION_READ_WRITE_SO_EXISTS 0x000000B8UL - -#define CKR_SIGNATURE_INVALID 0x000000C0UL -#define CKR_SIGNATURE_LEN_RANGE 0x000000C1UL -#define CKR_TEMPLATE_INCOMPLETE 0x000000D0UL -#define CKR_TEMPLATE_INCONSISTENT 0x000000D1UL -#define CKR_TOKEN_NOT_PRESENT 0x000000E0UL -#define CKR_TOKEN_NOT_RECOGNIZED 0x000000E1UL -#define CKR_TOKEN_WRITE_PROTECTED 0x000000E2UL -#define CKR_UNWRAPPING_KEY_HANDLE_INVALID 0x000000F0UL -#define CKR_UNWRAPPING_KEY_SIZE_RANGE 0x000000F1UL -#define CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT 0x000000F2UL -#define CKR_USER_ALREADY_LOGGED_IN 0x00000100UL -#define CKR_USER_NOT_LOGGED_IN 0x00000101UL -#define CKR_USER_PIN_NOT_INITIALIZED 0x00000102UL -#define CKR_USER_TYPE_INVALID 0x00000103UL - -#define CKR_USER_ANOTHER_ALREADY_LOGGED_IN 0x00000104UL -#define CKR_USER_TOO_MANY_TYPES 0x00000105UL - -#define CKR_WRAPPED_KEY_INVALID 0x00000110UL -#define CKR_WRAPPED_KEY_LEN_RANGE 0x00000112UL -#define CKR_WRAPPING_KEY_HANDLE_INVALID 0x00000113UL -#define CKR_WRAPPING_KEY_SIZE_RANGE 0x00000114UL -#define CKR_WRAPPING_KEY_TYPE_INCONSISTENT 0x00000115UL -#define CKR_RANDOM_SEED_NOT_SUPPORTED 0x00000120UL - -#define CKR_RANDOM_NO_RNG 0x00000121UL - -#define CKR_DOMAIN_PARAMS_INVALID 0x00000130UL - -#define CKR_CURVE_NOT_SUPPORTED 0x00000140UL - -#define CKR_BUFFER_TOO_SMALL 0x00000150UL -#define CKR_SAVED_STATE_INVALID 0x00000160UL -#define CKR_INFORMATION_SENSITIVE 0x00000170UL -#define CKR_STATE_UNSAVEABLE 0x00000180UL - -#define CKR_CRYPTOKI_NOT_INITIALIZED 0x00000190UL -#define CKR_CRYPTOKI_ALREADY_INITIALIZED 0x00000191UL -#define CKR_MUTEX_BAD 0x000001A0UL -#define CKR_MUTEX_NOT_LOCKED 0x000001A1UL - -#define CKR_NEW_PIN_MODE 0x000001B0UL -#define CKR_NEXT_OTP 0x000001B1UL - -#define CKR_EXCEEDED_MAX_ITERATIONS 0x000001B5UL -#define CKR_FIPS_SELF_TEST_FAILED 0x000001B6UL -#define CKR_LIBRARY_LOAD_FAILED 0x000001B7UL -#define CKR_PIN_TOO_WEAK 0x000001B8UL -#define CKR_PUBLIC_KEY_INVALID 0x000001B9UL - -#define CKR_FUNCTION_REJECTED 0x00000200UL - -#define CKR_VENDOR_DEFINED 0x80000000UL - - -/* CK_NOTIFY is an application callback that processes events */ -typedef CK_CALLBACK_FUNCTION(CK_RV, CK_NOTIFY)( - CK_SESSION_HANDLE hSession, /* the session's handle */ - CK_NOTIFICATION event, - CK_VOID_PTR pApplication /* passed to C_OpenSession */ -); - - -/* CK_FUNCTION_LIST is a structure holding a Cryptoki spec - * version and pointers of appropriate types to all the - * Cryptoki functions - */ -typedef struct CK_FUNCTION_LIST CK_FUNCTION_LIST; - -typedef CK_FUNCTION_LIST CK_PTR CK_FUNCTION_LIST_PTR; - -typedef CK_FUNCTION_LIST_PTR CK_PTR CK_FUNCTION_LIST_PTR_PTR; - - -/* CK_CREATEMUTEX is an application callback for creating a - * mutex object - */ -typedef CK_CALLBACK_FUNCTION(CK_RV, CK_CREATEMUTEX)( - CK_VOID_PTR_PTR ppMutex /* location to receive ptr to mutex */ -); - - -/* CK_DESTROYMUTEX is an application callback for destroying a - * mutex object - */ -typedef CK_CALLBACK_FUNCTION(CK_RV, CK_DESTROYMUTEX)( - CK_VOID_PTR pMutex /* pointer to mutex */ -); - - -/* CK_LOCKMUTEX is an application callback for locking a mutex */ -typedef CK_CALLBACK_FUNCTION(CK_RV, CK_LOCKMUTEX)( - CK_VOID_PTR pMutex /* pointer to mutex */ -); - - -/* CK_UNLOCKMUTEX is an application callback for unlocking a - * mutex - */ -typedef CK_CALLBACK_FUNCTION(CK_RV, CK_UNLOCKMUTEX)( - CK_VOID_PTR pMutex /* pointer to mutex */ -); - - -/* CK_C_INITIALIZE_ARGS provides the optional arguments to - * C_Initialize - */ -typedef struct CK_C_INITIALIZE_ARGS { - CK_CREATEMUTEX CreateMutex; - CK_DESTROYMUTEX DestroyMutex; - CK_LOCKMUTEX LockMutex; - CK_UNLOCKMUTEX UnlockMutex; - CK_FLAGS flags; - CK_VOID_PTR pReserved; -} CK_C_INITIALIZE_ARGS; - -/* flags: bit flags that provide capabilities of the slot - * Bit Flag Mask Meaning - */ -#define CKF_LIBRARY_CANT_CREATE_OS_THREADS 0x00000001UL -#define CKF_OS_LOCKING_OK 0x00000002UL - -typedef CK_C_INITIALIZE_ARGS CK_PTR CK_C_INITIALIZE_ARGS_PTR; - - -/* additional flags for parameters to functions */ - -/* CKF_DONT_BLOCK is for the function C_WaitForSlotEvent */ -#define CKF_DONT_BLOCK 1 - -/* CK_RSA_PKCS_MGF_TYPE is used to indicate the Message - * Generation Function (MGF) applied to a message block when - * formatting a message block for the PKCS #1 OAEP encryption - * scheme. - */ -typedef CK_ULONG CK_RSA_PKCS_MGF_TYPE; - -typedef CK_RSA_PKCS_MGF_TYPE CK_PTR CK_RSA_PKCS_MGF_TYPE_PTR; - -/* The following MGFs are defined */ -#define CKG_MGF1_SHA1 0x00000001UL -#define CKG_MGF1_SHA256 0x00000002UL -#define CKG_MGF1_SHA384 0x00000003UL -#define CKG_MGF1_SHA512 0x00000004UL -#define CKG_MGF1_SHA224 0x00000005UL - -/* CK_RSA_PKCS_OAEP_SOURCE_TYPE is used to indicate the source - * of the encoding parameter when formatting a message block - * for the PKCS #1 OAEP encryption scheme. - */ -typedef CK_ULONG CK_RSA_PKCS_OAEP_SOURCE_TYPE; - -typedef CK_RSA_PKCS_OAEP_SOURCE_TYPE CK_PTR CK_RSA_PKCS_OAEP_SOURCE_TYPE_PTR; - -/* The following encoding parameter sources are defined */ -#define CKZ_DATA_SPECIFIED 0x00000001UL - -/* CK_RSA_PKCS_OAEP_PARAMS provides the parameters to the - * CKM_RSA_PKCS_OAEP mechanism. - */ -typedef struct CK_RSA_PKCS_OAEP_PARAMS { - CK_MECHANISM_TYPE hashAlg; - CK_RSA_PKCS_MGF_TYPE mgf; - CK_RSA_PKCS_OAEP_SOURCE_TYPE source; - CK_VOID_PTR pSourceData; - CK_ULONG ulSourceDataLen; -} CK_RSA_PKCS_OAEP_PARAMS; - -typedef CK_RSA_PKCS_OAEP_PARAMS CK_PTR CK_RSA_PKCS_OAEP_PARAMS_PTR; - -/* CK_RSA_PKCS_PSS_PARAMS provides the parameters to the - * CKM_RSA_PKCS_PSS mechanism(s). - */ -typedef struct CK_RSA_PKCS_PSS_PARAMS { - CK_MECHANISM_TYPE hashAlg; - CK_RSA_PKCS_MGF_TYPE mgf; - CK_ULONG sLen; -} CK_RSA_PKCS_PSS_PARAMS; - -typedef CK_RSA_PKCS_PSS_PARAMS CK_PTR CK_RSA_PKCS_PSS_PARAMS_PTR; - -typedef CK_ULONG CK_EC_KDF_TYPE; - -/* The following EC Key Derivation Functions are defined */ -#define CKD_NULL 0x00000001UL -#define CKD_SHA1_KDF 0x00000002UL - -/* The following X9.42 DH key derivation functions are defined */ -#define CKD_SHA1_KDF_ASN1 0x00000003UL -#define CKD_SHA1_KDF_CONCATENATE 0x00000004UL -#define CKD_SHA224_KDF 0x00000005UL -#define CKD_SHA256_KDF 0x00000006UL -#define CKD_SHA384_KDF 0x00000007UL -#define CKD_SHA512_KDF 0x00000008UL -#define CKD_CPDIVERSIFY_KDF 0x00000009UL - - -/* CK_ECDH1_DERIVE_PARAMS provides the parameters to the - * CKM_ECDH1_DERIVE and CKM_ECDH1_COFACTOR_DERIVE mechanisms, - * where each party contributes one key pair. - */ -typedef struct CK_ECDH1_DERIVE_PARAMS { - CK_EC_KDF_TYPE kdf; - CK_ULONG ulSharedDataLen; - CK_BYTE_PTR pSharedData; - CK_ULONG ulPublicDataLen; - CK_BYTE_PTR pPublicData; -} CK_ECDH1_DERIVE_PARAMS; - -typedef CK_ECDH1_DERIVE_PARAMS CK_PTR CK_ECDH1_DERIVE_PARAMS_PTR; - -/* - * CK_ECDH2_DERIVE_PARAMS provides the parameters to the - * CKM_ECMQV_DERIVE mechanism, where each party contributes two key pairs. - */ -typedef struct CK_ECDH2_DERIVE_PARAMS { - CK_EC_KDF_TYPE kdf; - CK_ULONG ulSharedDataLen; - CK_BYTE_PTR pSharedData; - CK_ULONG ulPublicDataLen; - CK_BYTE_PTR pPublicData; - CK_ULONG ulPrivateDataLen; - CK_OBJECT_HANDLE hPrivateData; - CK_ULONG ulPublicDataLen2; - CK_BYTE_PTR pPublicData2; -} CK_ECDH2_DERIVE_PARAMS; - -typedef CK_ECDH2_DERIVE_PARAMS CK_PTR CK_ECDH2_DERIVE_PARAMS_PTR; - -typedef struct CK_ECMQV_DERIVE_PARAMS { - CK_EC_KDF_TYPE kdf; - CK_ULONG ulSharedDataLen; - CK_BYTE_PTR pSharedData; - CK_ULONG ulPublicDataLen; - CK_BYTE_PTR pPublicData; - CK_ULONG ulPrivateDataLen; - CK_OBJECT_HANDLE hPrivateData; - CK_ULONG ulPublicDataLen2; - CK_BYTE_PTR pPublicData2; - CK_OBJECT_HANDLE publicKey; -} CK_ECMQV_DERIVE_PARAMS; - -typedef CK_ECMQV_DERIVE_PARAMS CK_PTR CK_ECMQV_DERIVE_PARAMS_PTR; - -/* Typedefs and defines for the CKM_X9_42_DH_KEY_PAIR_GEN and the - * CKM_X9_42_DH_PARAMETER_GEN mechanisms - */ -typedef CK_ULONG CK_X9_42_DH_KDF_TYPE; -typedef CK_X9_42_DH_KDF_TYPE CK_PTR CK_X9_42_DH_KDF_TYPE_PTR; - -/* CK_X9_42_DH1_DERIVE_PARAMS provides the parameters to the - * CKM_X9_42_DH_DERIVE key derivation mechanism, where each party - * contributes one key pair - */ -typedef struct CK_X9_42_DH1_DERIVE_PARAMS { - CK_X9_42_DH_KDF_TYPE kdf; - CK_ULONG ulOtherInfoLen; - CK_BYTE_PTR pOtherInfo; - CK_ULONG ulPublicDataLen; - CK_BYTE_PTR pPublicData; -} CK_X9_42_DH1_DERIVE_PARAMS; - -typedef struct CK_X9_42_DH1_DERIVE_PARAMS CK_PTR CK_X9_42_DH1_DERIVE_PARAMS_PTR; - -/* CK_X9_42_DH2_DERIVE_PARAMS provides the parameters to the - * CKM_X9_42_DH_HYBRID_DERIVE and CKM_X9_42_MQV_DERIVE key derivation - * mechanisms, where each party contributes two key pairs - */ -typedef struct CK_X9_42_DH2_DERIVE_PARAMS { - CK_X9_42_DH_KDF_TYPE kdf; - CK_ULONG ulOtherInfoLen; - CK_BYTE_PTR pOtherInfo; - CK_ULONG ulPublicDataLen; - CK_BYTE_PTR pPublicData; - CK_ULONG ulPrivateDataLen; - CK_OBJECT_HANDLE hPrivateData; - CK_ULONG ulPublicDataLen2; - CK_BYTE_PTR pPublicData2; -} CK_X9_42_DH2_DERIVE_PARAMS; - -typedef CK_X9_42_DH2_DERIVE_PARAMS CK_PTR CK_X9_42_DH2_DERIVE_PARAMS_PTR; - -typedef struct CK_X9_42_MQV_DERIVE_PARAMS { - CK_X9_42_DH_KDF_TYPE kdf; - CK_ULONG ulOtherInfoLen; - CK_BYTE_PTR pOtherInfo; - CK_ULONG ulPublicDataLen; - CK_BYTE_PTR pPublicData; - CK_ULONG ulPrivateDataLen; - CK_OBJECT_HANDLE hPrivateData; - CK_ULONG ulPublicDataLen2; - CK_BYTE_PTR pPublicData2; - CK_OBJECT_HANDLE publicKey; -} CK_X9_42_MQV_DERIVE_PARAMS; - -typedef CK_X9_42_MQV_DERIVE_PARAMS CK_PTR CK_X9_42_MQV_DERIVE_PARAMS_PTR; - -/* CK_KEA_DERIVE_PARAMS provides the parameters to the - * CKM_KEA_DERIVE mechanism - */ -typedef struct CK_KEA_DERIVE_PARAMS { - CK_BBOOL isSender; - CK_ULONG ulRandomLen; - CK_BYTE_PTR pRandomA; - CK_BYTE_PTR pRandomB; - CK_ULONG ulPublicDataLen; - CK_BYTE_PTR pPublicData; -} CK_KEA_DERIVE_PARAMS; - -typedef CK_KEA_DERIVE_PARAMS CK_PTR CK_KEA_DERIVE_PARAMS_PTR; - - -/* CK_RC2_PARAMS provides the parameters to the CKM_RC2_ECB and - * CKM_RC2_MAC mechanisms. An instance of CK_RC2_PARAMS just - * holds the effective keysize - */ -typedef CK_ULONG CK_RC2_PARAMS; - -typedef CK_RC2_PARAMS CK_PTR CK_RC2_PARAMS_PTR; - - -/* CK_RC2_CBC_PARAMS provides the parameters to the CKM_RC2_CBC - * mechanism - */ -typedef struct CK_RC2_CBC_PARAMS { - CK_ULONG ulEffectiveBits; /* effective bits (1-1024) */ - CK_BYTE iv[8]; /* IV for CBC mode */ -} CK_RC2_CBC_PARAMS; - -typedef CK_RC2_CBC_PARAMS CK_PTR CK_RC2_CBC_PARAMS_PTR; - - -/* CK_RC2_MAC_GENERAL_PARAMS provides the parameters for the - * CKM_RC2_MAC_GENERAL mechanism - */ -typedef struct CK_RC2_MAC_GENERAL_PARAMS { - CK_ULONG ulEffectiveBits; /* effective bits (1-1024) */ - CK_ULONG ulMacLength; /* Length of MAC in bytes */ -} CK_RC2_MAC_GENERAL_PARAMS; - -typedef CK_RC2_MAC_GENERAL_PARAMS CK_PTR \ - CK_RC2_MAC_GENERAL_PARAMS_PTR; - - -/* CK_RC5_PARAMS provides the parameters to the CKM_RC5_ECB and - * CKM_RC5_MAC mechanisms - */ -typedef struct CK_RC5_PARAMS { - CK_ULONG ulWordsize; /* wordsize in bits */ - CK_ULONG ulRounds; /* number of rounds */ -} CK_RC5_PARAMS; - -typedef CK_RC5_PARAMS CK_PTR CK_RC5_PARAMS_PTR; - - -/* CK_RC5_CBC_PARAMS provides the parameters to the CKM_RC5_CBC - * mechanism - */ -typedef struct CK_RC5_CBC_PARAMS { - CK_ULONG ulWordsize; /* wordsize in bits */ - CK_ULONG ulRounds; /* number of rounds */ - CK_BYTE_PTR pIv; /* pointer to IV */ - CK_ULONG ulIvLen; /* length of IV in bytes */ -} CK_RC5_CBC_PARAMS; - -typedef CK_RC5_CBC_PARAMS CK_PTR CK_RC5_CBC_PARAMS_PTR; - - -/* CK_RC5_MAC_GENERAL_PARAMS provides the parameters for the - * CKM_RC5_MAC_GENERAL mechanism - */ -typedef struct CK_RC5_MAC_GENERAL_PARAMS { - CK_ULONG ulWordsize; /* wordsize in bits */ - CK_ULONG ulRounds; /* number of rounds */ - CK_ULONG ulMacLength; /* Length of MAC in bytes */ -} CK_RC5_MAC_GENERAL_PARAMS; - -typedef CK_RC5_MAC_GENERAL_PARAMS CK_PTR \ - CK_RC5_MAC_GENERAL_PARAMS_PTR; - -/* CK_MAC_GENERAL_PARAMS provides the parameters to most block - * ciphers' MAC_GENERAL mechanisms. Its value is the length of - * the MAC - */ -typedef CK_ULONG CK_MAC_GENERAL_PARAMS; - -typedef CK_MAC_GENERAL_PARAMS CK_PTR CK_MAC_GENERAL_PARAMS_PTR; - -typedef struct CK_DES_CBC_ENCRYPT_DATA_PARAMS { - CK_BYTE iv[8]; - CK_BYTE_PTR pData; - CK_ULONG length; -} CK_DES_CBC_ENCRYPT_DATA_PARAMS; - -typedef CK_DES_CBC_ENCRYPT_DATA_PARAMS CK_PTR CK_DES_CBC_ENCRYPT_DATA_PARAMS_PTR; - -typedef struct CK_AES_CBC_ENCRYPT_DATA_PARAMS { - CK_BYTE iv[16]; - CK_BYTE_PTR pData; - CK_ULONG length; -} CK_AES_CBC_ENCRYPT_DATA_PARAMS; - -typedef CK_AES_CBC_ENCRYPT_DATA_PARAMS CK_PTR CK_AES_CBC_ENCRYPT_DATA_PARAMS_PTR; - -/* CK_SKIPJACK_PRIVATE_WRAP_PARAMS provides the parameters to the - * CKM_SKIPJACK_PRIVATE_WRAP mechanism - */ -typedef struct CK_SKIPJACK_PRIVATE_WRAP_PARAMS { - CK_ULONG ulPasswordLen; - CK_BYTE_PTR pPassword; - CK_ULONG ulPublicDataLen; - CK_BYTE_PTR pPublicData; - CK_ULONG ulPAndGLen; - CK_ULONG ulQLen; - CK_ULONG ulRandomLen; - CK_BYTE_PTR pRandomA; - CK_BYTE_PTR pPrimeP; - CK_BYTE_PTR pBaseG; - CK_BYTE_PTR pSubprimeQ; -} CK_SKIPJACK_PRIVATE_WRAP_PARAMS; - -typedef CK_SKIPJACK_PRIVATE_WRAP_PARAMS CK_PTR \ - CK_SKIPJACK_PRIVATE_WRAP_PARAMS_PTR; - - -/* CK_SKIPJACK_RELAYX_PARAMS provides the parameters to the - * CKM_SKIPJACK_RELAYX mechanism - */ -typedef struct CK_SKIPJACK_RELAYX_PARAMS { - CK_ULONG ulOldWrappedXLen; - CK_BYTE_PTR pOldWrappedX; - CK_ULONG ulOldPasswordLen; - CK_BYTE_PTR pOldPassword; - CK_ULONG ulOldPublicDataLen; - CK_BYTE_PTR pOldPublicData; - CK_ULONG ulOldRandomLen; - CK_BYTE_PTR pOldRandomA; - CK_ULONG ulNewPasswordLen; - CK_BYTE_PTR pNewPassword; - CK_ULONG ulNewPublicDataLen; - CK_BYTE_PTR pNewPublicData; - CK_ULONG ulNewRandomLen; - CK_BYTE_PTR pNewRandomA; -} CK_SKIPJACK_RELAYX_PARAMS; - -typedef CK_SKIPJACK_RELAYX_PARAMS CK_PTR \ - CK_SKIPJACK_RELAYX_PARAMS_PTR; - - -typedef struct CK_PBE_PARAMS { - CK_BYTE_PTR pInitVector; - CK_UTF8CHAR_PTR pPassword; - CK_ULONG ulPasswordLen; - CK_BYTE_PTR pSalt; - CK_ULONG ulSaltLen; - CK_ULONG ulIteration; -} CK_PBE_PARAMS; - -typedef CK_PBE_PARAMS CK_PTR CK_PBE_PARAMS_PTR; - - -/* CK_KEY_WRAP_SET_OAEP_PARAMS provides the parameters to the - * CKM_KEY_WRAP_SET_OAEP mechanism - */ -typedef struct CK_KEY_WRAP_SET_OAEP_PARAMS { - CK_BYTE bBC; /* block contents byte */ - CK_BYTE_PTR pX; /* extra data */ - CK_ULONG ulXLen; /* length of extra data in bytes */ -} CK_KEY_WRAP_SET_OAEP_PARAMS; - -typedef CK_KEY_WRAP_SET_OAEP_PARAMS CK_PTR CK_KEY_WRAP_SET_OAEP_PARAMS_PTR; - -typedef struct CK_SSL3_RANDOM_DATA { - CK_BYTE_PTR pClientRandom; - CK_ULONG ulClientRandomLen; - CK_BYTE_PTR pServerRandom; - CK_ULONG ulServerRandomLen; -} CK_SSL3_RANDOM_DATA; - - -typedef struct CK_SSL3_MASTER_KEY_DERIVE_PARAMS { - CK_SSL3_RANDOM_DATA RandomInfo; - CK_VERSION_PTR pVersion; -} CK_SSL3_MASTER_KEY_DERIVE_PARAMS; - -typedef struct CK_SSL3_MASTER_KEY_DERIVE_PARAMS CK_PTR \ - CK_SSL3_MASTER_KEY_DERIVE_PARAMS_PTR; - -typedef struct CK_SSL3_KEY_MAT_OUT { - CK_OBJECT_HANDLE hClientMacSecret; - CK_OBJECT_HANDLE hServerMacSecret; - CK_OBJECT_HANDLE hClientKey; - CK_OBJECT_HANDLE hServerKey; - CK_BYTE_PTR pIVClient; - CK_BYTE_PTR pIVServer; -} CK_SSL3_KEY_MAT_OUT; - -typedef CK_SSL3_KEY_MAT_OUT CK_PTR CK_SSL3_KEY_MAT_OUT_PTR; - - -typedef struct CK_SSL3_KEY_MAT_PARAMS { - CK_ULONG ulMacSizeInBits; - CK_ULONG ulKeySizeInBits; - CK_ULONG ulIVSizeInBits; - CK_BBOOL bIsExport; - CK_SSL3_RANDOM_DATA RandomInfo; - CK_SSL3_KEY_MAT_OUT_PTR pReturnedKeyMaterial; -} CK_SSL3_KEY_MAT_PARAMS; - -typedef CK_SSL3_KEY_MAT_PARAMS CK_PTR CK_SSL3_KEY_MAT_PARAMS_PTR; - -typedef struct CK_TLS_PRF_PARAMS { - CK_BYTE_PTR pSeed; - CK_ULONG ulSeedLen; - CK_BYTE_PTR pLabel; - CK_ULONG ulLabelLen; - CK_BYTE_PTR pOutput; - CK_ULONG_PTR pulOutputLen; -} CK_TLS_PRF_PARAMS; - -typedef CK_TLS_PRF_PARAMS CK_PTR CK_TLS_PRF_PARAMS_PTR; - -typedef struct CK_WTLS_RANDOM_DATA { - CK_BYTE_PTR pClientRandom; - CK_ULONG ulClientRandomLen; - CK_BYTE_PTR pServerRandom; - CK_ULONG ulServerRandomLen; -} CK_WTLS_RANDOM_DATA; - -typedef CK_WTLS_RANDOM_DATA CK_PTR CK_WTLS_RANDOM_DATA_PTR; - -typedef struct CK_WTLS_MASTER_KEY_DERIVE_PARAMS { - CK_MECHANISM_TYPE DigestMechanism; - CK_WTLS_RANDOM_DATA RandomInfo; - CK_BYTE_PTR pVersion; -} CK_WTLS_MASTER_KEY_DERIVE_PARAMS; - -typedef CK_WTLS_MASTER_KEY_DERIVE_PARAMS CK_PTR \ - CK_WTLS_MASTER_KEY_DERIVE_PARAMS_PTR; - -typedef struct CK_WTLS_PRF_PARAMS { - CK_MECHANISM_TYPE DigestMechanism; - CK_BYTE_PTR pSeed; - CK_ULONG ulSeedLen; - CK_BYTE_PTR pLabel; - CK_ULONG ulLabelLen; - CK_BYTE_PTR pOutput; - CK_ULONG_PTR pulOutputLen; -} CK_WTLS_PRF_PARAMS; - -typedef CK_WTLS_PRF_PARAMS CK_PTR CK_WTLS_PRF_PARAMS_PTR; - -typedef struct CK_WTLS_KEY_MAT_OUT { - CK_OBJECT_HANDLE hMacSecret; - CK_OBJECT_HANDLE hKey; - CK_BYTE_PTR pIV; -} CK_WTLS_KEY_MAT_OUT; - -typedef CK_WTLS_KEY_MAT_OUT CK_PTR CK_WTLS_KEY_MAT_OUT_PTR; - -typedef struct CK_WTLS_KEY_MAT_PARAMS { - CK_MECHANISM_TYPE DigestMechanism; - CK_ULONG ulMacSizeInBits; - CK_ULONG ulKeySizeInBits; - CK_ULONG ulIVSizeInBits; - CK_ULONG ulSequenceNumber; - CK_BBOOL bIsExport; - CK_WTLS_RANDOM_DATA RandomInfo; - CK_WTLS_KEY_MAT_OUT_PTR pReturnedKeyMaterial; -} CK_WTLS_KEY_MAT_PARAMS; - -typedef CK_WTLS_KEY_MAT_PARAMS CK_PTR CK_WTLS_KEY_MAT_PARAMS_PTR; - -typedef struct CK_CMS_SIG_PARAMS { - CK_OBJECT_HANDLE certificateHandle; - CK_MECHANISM_PTR pSigningMechanism; - CK_MECHANISM_PTR pDigestMechanism; - CK_UTF8CHAR_PTR pContentType; - CK_BYTE_PTR pRequestedAttributes; - CK_ULONG ulRequestedAttributesLen; - CK_BYTE_PTR pRequiredAttributes; - CK_ULONG ulRequiredAttributesLen; -} CK_CMS_SIG_PARAMS; - -typedef CK_CMS_SIG_PARAMS CK_PTR CK_CMS_SIG_PARAMS_PTR; - -typedef struct CK_KEY_DERIVATION_STRING_DATA { - CK_BYTE_PTR pData; - CK_ULONG ulLen; -} CK_KEY_DERIVATION_STRING_DATA; - -typedef CK_KEY_DERIVATION_STRING_DATA CK_PTR \ - CK_KEY_DERIVATION_STRING_DATA_PTR; - - -/* The CK_EXTRACT_PARAMS is used for the - * CKM_EXTRACT_KEY_FROM_KEY mechanism. It specifies which bit - * of the base key should be used as the first bit of the - * derived key - */ -typedef CK_ULONG CK_EXTRACT_PARAMS; - -typedef CK_EXTRACT_PARAMS CK_PTR CK_EXTRACT_PARAMS_PTR; - -/* CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE is used to - * indicate the Pseudo-Random Function (PRF) used to generate - * key bits using PKCS #5 PBKDF2. - */ -typedef CK_ULONG CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE; - -typedef CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE CK_PTR \ - CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE_PTR; - -#define CKP_PKCS5_PBKD2_HMAC_SHA1 0x00000001UL -#define CKP_PKCS5_PBKD2_HMAC_GOSTR3411 0x00000002UL -#define CKP_PKCS5_PBKD2_HMAC_SHA224 0x00000003UL -#define CKP_PKCS5_PBKD2_HMAC_SHA256 0x00000004UL -#define CKP_PKCS5_PBKD2_HMAC_SHA384 0x00000005UL -#define CKP_PKCS5_PBKD2_HMAC_SHA512 0x00000006UL -#define CKP_PKCS5_PBKD2_HMAC_SHA512_224 0x00000007UL -#define CKP_PKCS5_PBKD2_HMAC_SHA512_256 0x00000008UL - -/* CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE is used to indicate the - * source of the salt value when deriving a key using PKCS #5 - * PBKDF2. - */ -typedef CK_ULONG CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE; - -typedef CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE CK_PTR \ - CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE_PTR; - -/* The following salt value sources are defined in PKCS #5 v2.0. */ -#define CKZ_SALT_SPECIFIED 0x00000001UL - -/* CK_PKCS5_PBKD2_PARAMS is a structure that provides the - * parameters to the CKM_PKCS5_PBKD2 mechanism. - */ -typedef struct CK_PKCS5_PBKD2_PARAMS { - CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; - CK_VOID_PTR pSaltSourceData; - CK_ULONG ulSaltSourceDataLen; - CK_ULONG iterations; - CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf; - CK_VOID_PTR pPrfData; - CK_ULONG ulPrfDataLen; - CK_UTF8CHAR_PTR pPassword; - CK_ULONG_PTR ulPasswordLen; -} CK_PKCS5_PBKD2_PARAMS; - -typedef CK_PKCS5_PBKD2_PARAMS CK_PTR CK_PKCS5_PBKD2_PARAMS_PTR; - -/* CK_PKCS5_PBKD2_PARAMS2 is a corrected version of the CK_PKCS5_PBKD2_PARAMS - * structure that provides the parameters to the CKM_PKCS5_PBKD2 mechanism - * noting that the ulPasswordLen field is a CK_ULONG and not a CK_ULONG_PTR. - */ -typedef struct CK_PKCS5_PBKD2_PARAMS2 { - CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; - CK_VOID_PTR pSaltSourceData; - CK_ULONG ulSaltSourceDataLen; - CK_ULONG iterations; - CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf; - CK_VOID_PTR pPrfData; - CK_ULONG ulPrfDataLen; - CK_UTF8CHAR_PTR pPassword; - CK_ULONG ulPasswordLen; -} CK_PKCS5_PBKD2_PARAMS2; - -typedef CK_PKCS5_PBKD2_PARAMS2 CK_PTR CK_PKCS5_PBKD2_PARAMS2_PTR; - -typedef CK_ULONG CK_OTP_PARAM_TYPE; -typedef CK_OTP_PARAM_TYPE CK_PARAM_TYPE; /* backward compatibility */ - -typedef struct CK_OTP_PARAM { - CK_OTP_PARAM_TYPE type; - CK_VOID_PTR pValue; - CK_ULONG ulValueLen; -} CK_OTP_PARAM; - -typedef CK_OTP_PARAM CK_PTR CK_OTP_PARAM_PTR; - -typedef struct CK_OTP_PARAMS { - CK_OTP_PARAM_PTR pParams; - CK_ULONG ulCount; -} CK_OTP_PARAMS; - -typedef CK_OTP_PARAMS CK_PTR CK_OTP_PARAMS_PTR; - -typedef struct CK_OTP_SIGNATURE_INFO { - CK_OTP_PARAM_PTR pParams; - CK_ULONG ulCount; -} CK_OTP_SIGNATURE_INFO; - -typedef CK_OTP_SIGNATURE_INFO CK_PTR CK_OTP_SIGNATURE_INFO_PTR; - -#define CK_OTP_VALUE 0UL -#define CK_OTP_PIN 1UL -#define CK_OTP_CHALLENGE 2UL -#define CK_OTP_TIME 3UL -#define CK_OTP_COUNTER 4UL -#define CK_OTP_FLAGS 5UL -#define CK_OTP_OUTPUT_LENGTH 6UL -#define CK_OTP_OUTPUT_FORMAT 7UL - -#define CKF_NEXT_OTP 0x00000001UL -#define CKF_EXCLUDE_TIME 0x00000002UL -#define CKF_EXCLUDE_COUNTER 0x00000004UL -#define CKF_EXCLUDE_CHALLENGE 0x00000008UL -#define CKF_EXCLUDE_PIN 0x00000010UL -#define CKF_USER_FRIENDLY_OTP 0x00000020UL - -typedef struct CK_KIP_PARAMS { - CK_MECHANISM_PTR pMechanism; - CK_OBJECT_HANDLE hKey; - CK_BYTE_PTR pSeed; - CK_ULONG ulSeedLen; -} CK_KIP_PARAMS; - -typedef CK_KIP_PARAMS CK_PTR CK_KIP_PARAMS_PTR; - -typedef struct CK_AES_CTR_PARAMS { - CK_ULONG ulCounterBits; - CK_BYTE cb[16]; -} CK_AES_CTR_PARAMS; - -typedef CK_AES_CTR_PARAMS CK_PTR CK_AES_CTR_PARAMS_PTR; - -typedef struct CK_GCM_PARAMS { - CK_BYTE_PTR pIv; - CK_ULONG ulIvLen; - CK_ULONG ulIvBits; - CK_BYTE_PTR pAAD; - CK_ULONG ulAADLen; - CK_ULONG ulTagBits; -} CK_GCM_PARAMS; - -typedef CK_GCM_PARAMS CK_PTR CK_GCM_PARAMS_PTR; - -typedef struct CK_CCM_PARAMS { - CK_ULONG ulDataLen; - CK_BYTE_PTR pNonce; - CK_ULONG ulNonceLen; - CK_BYTE_PTR pAAD; - CK_ULONG ulAADLen; - CK_ULONG ulMACLen; -} CK_CCM_PARAMS; - -typedef CK_CCM_PARAMS CK_PTR CK_CCM_PARAMS_PTR; - -/* Deprecated. Use CK_GCM_PARAMS */ -typedef struct CK_AES_GCM_PARAMS { - CK_BYTE_PTR pIv; - CK_ULONG ulIvLen; - CK_ULONG ulIvBits; - CK_BYTE_PTR pAAD; - CK_ULONG ulAADLen; - CK_ULONG ulTagBits; -} CK_AES_GCM_PARAMS; - -typedef CK_AES_GCM_PARAMS CK_PTR CK_AES_GCM_PARAMS_PTR; - -/* Deprecated. Use CK_CCM_PARAMS */ -typedef struct CK_AES_CCM_PARAMS { - CK_ULONG ulDataLen; - CK_BYTE_PTR pNonce; - CK_ULONG ulNonceLen; - CK_BYTE_PTR pAAD; - CK_ULONG ulAADLen; - CK_ULONG ulMACLen; -} CK_AES_CCM_PARAMS; - -typedef CK_AES_CCM_PARAMS CK_PTR CK_AES_CCM_PARAMS_PTR; - -typedef struct CK_CAMELLIA_CTR_PARAMS { - CK_ULONG ulCounterBits; - CK_BYTE cb[16]; -} CK_CAMELLIA_CTR_PARAMS; - -typedef CK_CAMELLIA_CTR_PARAMS CK_PTR CK_CAMELLIA_CTR_PARAMS_PTR; - -typedef struct CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS { - CK_BYTE iv[16]; - CK_BYTE_PTR pData; - CK_ULONG length; -} CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS; - -typedef CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ - CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS_PTR; - -typedef struct CK_ARIA_CBC_ENCRYPT_DATA_PARAMS { - CK_BYTE iv[16]; - CK_BYTE_PTR pData; - CK_ULONG length; -} CK_ARIA_CBC_ENCRYPT_DATA_PARAMS; - -typedef CK_ARIA_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ - CK_ARIA_CBC_ENCRYPT_DATA_PARAMS_PTR; - -typedef struct CK_DSA_PARAMETER_GEN_PARAM { - CK_MECHANISM_TYPE hash; - CK_BYTE_PTR pSeed; - CK_ULONG ulSeedLen; - CK_ULONG ulIndex; -} CK_DSA_PARAMETER_GEN_PARAM; - -typedef CK_DSA_PARAMETER_GEN_PARAM CK_PTR CK_DSA_PARAMETER_GEN_PARAM_PTR; - -typedef struct CK_ECDH_AES_KEY_WRAP_PARAMS { - CK_ULONG ulAESKeyBits; - CK_EC_KDF_TYPE kdf; - CK_ULONG ulSharedDataLen; - CK_BYTE_PTR pSharedData; -} CK_ECDH_AES_KEY_WRAP_PARAMS; - -typedef CK_ECDH_AES_KEY_WRAP_PARAMS CK_PTR CK_ECDH_AES_KEY_WRAP_PARAMS_PTR; - -typedef CK_ULONG CK_JAVA_MIDP_SECURITY_DOMAIN; - -typedef CK_ULONG CK_CERTIFICATE_CATEGORY; - -typedef struct CK_RSA_AES_KEY_WRAP_PARAMS { - CK_ULONG ulAESKeyBits; - CK_RSA_PKCS_OAEP_PARAMS_PTR pOAEPParams; -} CK_RSA_AES_KEY_WRAP_PARAMS; - -typedef CK_RSA_AES_KEY_WRAP_PARAMS CK_PTR CK_RSA_AES_KEY_WRAP_PARAMS_PTR; - -typedef struct CK_TLS12_MASTER_KEY_DERIVE_PARAMS { - CK_SSL3_RANDOM_DATA RandomInfo; - CK_VERSION_PTR pVersion; - CK_MECHANISM_TYPE prfHashMechanism; -} CK_TLS12_MASTER_KEY_DERIVE_PARAMS; - -typedef CK_TLS12_MASTER_KEY_DERIVE_PARAMS CK_PTR \ - CK_TLS12_MASTER_KEY_DERIVE_PARAMS_PTR; - -typedef struct CK_TLS12_KEY_MAT_PARAMS { - CK_ULONG ulMacSizeInBits; - CK_ULONG ulKeySizeInBits; - CK_ULONG ulIVSizeInBits; - CK_BBOOL bIsExport; - CK_SSL3_RANDOM_DATA RandomInfo; - CK_SSL3_KEY_MAT_OUT_PTR pReturnedKeyMaterial; - CK_MECHANISM_TYPE prfHashMechanism; -} CK_TLS12_KEY_MAT_PARAMS; - -typedef CK_TLS12_KEY_MAT_PARAMS CK_PTR CK_TLS12_KEY_MAT_PARAMS_PTR; - -typedef struct CK_TLS_KDF_PARAMS { - CK_MECHANISM_TYPE prfMechanism; - CK_BYTE_PTR pLabel; - CK_ULONG ulLabelLength; - CK_SSL3_RANDOM_DATA RandomInfo; - CK_BYTE_PTR pContextData; - CK_ULONG ulContextDataLength; -} CK_TLS_KDF_PARAMS; - -typedef CK_TLS_KDF_PARAMS CK_PTR CK_TLS_KDF_PARAMS_PTR; - -typedef struct CK_TLS_MAC_PARAMS { - CK_MECHANISM_TYPE prfHashMechanism; - CK_ULONG ulMacLength; - CK_ULONG ulServerOrClient; -} CK_TLS_MAC_PARAMS; - -typedef CK_TLS_MAC_PARAMS CK_PTR CK_TLS_MAC_PARAMS_PTR; - -typedef struct CK_GOSTR3410_DERIVE_PARAMS { - CK_EC_KDF_TYPE kdf; - CK_BYTE_PTR pPublicData; - CK_ULONG ulPublicDataLen; - CK_BYTE_PTR pUKM; - CK_ULONG ulUKMLen; -} CK_GOSTR3410_DERIVE_PARAMS; - -typedef CK_GOSTR3410_DERIVE_PARAMS CK_PTR CK_GOSTR3410_DERIVE_PARAMS_PTR; - -typedef struct CK_GOSTR3410_KEY_WRAP_PARAMS { - CK_BYTE_PTR pWrapOID; - CK_ULONG ulWrapOIDLen; - CK_BYTE_PTR pUKM; - CK_ULONG ulUKMLen; - CK_OBJECT_HANDLE hKey; -} CK_GOSTR3410_KEY_WRAP_PARAMS; - -typedef CK_GOSTR3410_KEY_WRAP_PARAMS CK_PTR CK_GOSTR3410_KEY_WRAP_PARAMS_PTR; - -typedef struct CK_SEED_CBC_ENCRYPT_DATA_PARAMS { - CK_BYTE iv[16]; - CK_BYTE_PTR pData; - CK_ULONG length; -} CK_SEED_CBC_ENCRYPT_DATA_PARAMS; - -typedef CK_SEED_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ - CK_SEED_CBC_ENCRYPT_DATA_PARAMS_PTR; - -#endif /* _PKCS11T_H_ */ - diff --git a/contrib/restricted/aws/aws-c-io/source/pkcs11_lib.c b/contrib/restricted/aws/aws-c-io/source/pkcs11_lib.c index 8047d118c79..41c74b7413b 100644 --- a/contrib/restricted/aws/aws-c-io/source/pkcs11_lib.c +++ b/contrib/restricted/aws/aws-c-io/source/pkcs11_lib.c @@ -1332,7 +1332,7 @@ int aws_pkcs11_lib_sign( digest_alg, signature_alg, out_signature); - case CKK_ECDSA: + case CKK_EC: return s_pkcs11_sign_ecdsa( pkcs11_lib, session_handle, diff --git a/contrib/restricted/aws/aws-c-io/source/pkcs11_private.h b/contrib/restricted/aws/aws-c-io/source/pkcs11_private.h index 314c739199d..370beba7d7c 100644 --- a/contrib/restricted/aws/aws-c-io/source/pkcs11_private.h +++ b/contrib/restricted/aws/aws-c-io/source/pkcs11_private.h @@ -7,13 +7,6 @@ */ #include <aws/io/tls_channel_handler.h> -/* These defines must exist before the official PKCS#11 headers are included */ -#define CK_PTR * -#define NULL_PTR 0 -#define CK_DEFINE_FUNCTION(returnType, name) returnType name -#define CK_DECLARE_FUNCTION(returnType, name) returnType name -#define CK_DECLARE_FUNCTION_POINTER(returnType, name) returnType(CK_PTR name) -#define CK_CALLBACK_FUNCTION(returnType, name) returnType(CK_PTR name) #include "pkcs11/v2.40/pkcs11.h" /** diff --git a/contrib/restricted/aws/aws-c-io/source/pkcs11_tls_op_handler.c b/contrib/restricted/aws/aws-c-io/source/pkcs11_tls_op_handler.c index 6d155cbd9e5..0951d5c8787 100644 --- a/contrib/restricted/aws/aws-c-io/source/pkcs11_tls_op_handler.c +++ b/contrib/restricted/aws/aws-c-io/source/pkcs11_tls_op_handler.c @@ -183,10 +183,8 @@ struct aws_custom_key_op_handler *aws_pkcs11_tls_op_handler_new( goto done; } - if (pkcs_user_pin != NULL) { - if (aws_pkcs11_lib_login_user(pkcs11_handler->lib, pkcs11_handler->session_handle, pkcs_user_pin)) { - goto done; - } + if (aws_pkcs11_lib_login_user(pkcs11_handler->lib, pkcs11_handler->session_handle, pkcs_user_pin)) { + goto done; } if (aws_pkcs11_lib_find_private_key( diff --git a/contrib/restricted/aws/aws-c-io/source/pki_utils.c b/contrib/restricted/aws/aws-c-io/source/pki_utils.c deleted file mode 100644 index 2be52e80ceb..00000000000 --- a/contrib/restricted/aws/aws-c-io/source/pki_utils.c +++ /dev/null @@ -1,224 +0,0 @@ -/** - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0. - */ -#include <aws/io/private/pki_utils.h> - -#include <aws/common/encoding.h> - -#include <aws/io/file_utils.h> -#include <aws/io/logging.h> - -#include <ctype.h> -#include <errno.h> -#include <string.h> - -enum PEM_PARSE_STATE { - BEGIN, - ON_DATA, -}; - -void aws_cert_chain_clean_up(struct aws_array_list *cert_chain) { - for (size_t i = 0; i < aws_array_list_length(cert_chain); ++i) { - struct aws_byte_buf *decoded_buffer_ptr = NULL; - aws_array_list_get_at_ptr(cert_chain, (void **)&decoded_buffer_ptr, i); - - if (decoded_buffer_ptr) { - aws_secure_zero(decoded_buffer_ptr->buffer, decoded_buffer_ptr->len); - aws_byte_buf_clean_up(decoded_buffer_ptr); - } - } - - /* remember, we don't own it so we don't free it, just undo whatever mutations we've done at this point. */ - aws_array_list_clear(cert_chain); -} - -static int s_convert_pem_to_raw_base64( - struct aws_allocator *allocator, - const struct aws_byte_cursor *pem, - struct aws_array_list *cert_chain_or_key) { - enum PEM_PARSE_STATE state = BEGIN; - - struct aws_byte_buf current_cert; - const char *begin_header = "-----BEGIN"; - const char *end_header = "-----END"; - size_t begin_header_len = strlen(begin_header); - size_t end_header_len = strlen(end_header); - bool on_length_calc = true; - - struct aws_array_list split_buffers; - if (aws_array_list_init_dynamic(&split_buffers, allocator, 16, sizeof(struct aws_byte_cursor))) { - return AWS_OP_ERR; - } - - if (aws_byte_cursor_split_on_char(pem, '\n', &split_buffers)) { - aws_array_list_clean_up(&split_buffers); - AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: Invalid PEM buffer: failed to split on newline"); - return AWS_OP_ERR; - } - - size_t split_count = aws_array_list_length(&split_buffers); - size_t i = 0; - size_t index_of_current_cert_start = 0; - size_t current_cert_len = 0; - - while (i < split_count) { - struct aws_byte_cursor *current_cur_ptr = NULL; - aws_array_list_get_at_ptr(&split_buffers, (void **)¤t_cur_ptr, i); - - /* burn off the padding in the buffer first. - * Worst case we'll only have to do this once per line in the buffer. */ - while (current_cur_ptr->len && aws_isspace(*current_cur_ptr->ptr)) { - aws_byte_cursor_advance(current_cur_ptr, 1); - } - - /* handle CRLF on Windows by burning '\r' off the end of the buffer */ - if (current_cur_ptr->len && (current_cur_ptr->ptr[current_cur_ptr->len - 1] == '\r')) { - current_cur_ptr->len--; - } - - switch (state) { - case BEGIN: - if (current_cur_ptr->len > begin_header_len && - !strncmp((const char *)current_cur_ptr->ptr, begin_header, begin_header_len)) { - state = ON_DATA; - index_of_current_cert_start = i + 1; - } - ++i; - break; - /* this loops through the lines containing data twice. First to figure out the length, a second - * time to actually copy the data. */ - case ON_DATA: - /* Found end tag. */ - if (current_cur_ptr->len > end_header_len && - !strncmp((const char *)current_cur_ptr->ptr, end_header, end_header_len)) { - if (on_length_calc) { - on_length_calc = false; - state = ON_DATA; - i = index_of_current_cert_start; - - if (aws_byte_buf_init(¤t_cert, allocator, current_cert_len)) { - goto end_of_loop; - } - - } else { - if (aws_array_list_push_back(cert_chain_or_key, ¤t_cert)) { - aws_secure_zero(¤t_cert.buffer, current_cert.len); - aws_byte_buf_clean_up(¤t_cert); - goto end_of_loop; - } - state = BEGIN; - on_length_calc = true; - current_cert_len = 0; - ++i; - } - /* actually on a line with data in it. */ - } else { - if (!on_length_calc) { - aws_byte_buf_write(¤t_cert, current_cur_ptr->ptr, current_cur_ptr->len); - } else { - current_cert_len += current_cur_ptr->len; - } - ++i; - } - break; - } - } - -end_of_loop: - aws_array_list_clean_up(&split_buffers); - - if (state == BEGIN && aws_array_list_length(cert_chain_or_key) > 0) { - return AWS_OP_SUCCESS; - } - - AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: Invalid PEM buffer."); - aws_cert_chain_clean_up(cert_chain_or_key); - return aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); -} - -int aws_decode_pem_to_buffer_list( - struct aws_allocator *alloc, - const struct aws_byte_cursor *pem_cursor, - struct aws_array_list *cert_chain_or_key) { - AWS_ASSERT(aws_array_list_length(cert_chain_or_key) == 0); - struct aws_array_list base_64_buffer_list; - - if (aws_array_list_init_dynamic(&base_64_buffer_list, alloc, 2, sizeof(struct aws_byte_buf))) { - return AWS_OP_ERR; - } - - int err_code = AWS_OP_ERR; - - if (s_convert_pem_to_raw_base64(alloc, pem_cursor, &base_64_buffer_list)) { - goto cleanup_base64_buffer_list; - } - - for (size_t i = 0; i < aws_array_list_length(&base_64_buffer_list); ++i) { - size_t decoded_len = 0; - struct aws_byte_buf *byte_buf_ptr = NULL; - aws_array_list_get_at_ptr(&base_64_buffer_list, (void **)&byte_buf_ptr, i); - struct aws_byte_cursor byte_cur = aws_byte_cursor_from_buf(byte_buf_ptr); - - if (aws_base64_compute_decoded_len(&byte_cur, &decoded_len)) { - aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); - goto cleanup_all; - } - - struct aws_byte_buf decoded_buffer; - if (aws_byte_buf_init(&decoded_buffer, alloc, decoded_len)) { - goto cleanup_all; - } - - if (aws_base64_decode(&byte_cur, &decoded_buffer)) { - aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); - aws_byte_buf_clean_up_secure(&decoded_buffer); - goto cleanup_all; - } - - if (aws_array_list_push_back(cert_chain_or_key, &decoded_buffer)) { - aws_byte_buf_clean_up_secure(&decoded_buffer); - goto cleanup_all; - } - } - - err_code = AWS_OP_SUCCESS; - -cleanup_all: - if (err_code != AWS_OP_SUCCESS) { - AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: Invalid PEM buffer."); - aws_cert_chain_clean_up(cert_chain_or_key); - } - -cleanup_base64_buffer_list: - aws_cert_chain_clean_up(&base_64_buffer_list); - aws_array_list_clean_up(&base_64_buffer_list); - - return err_code; -} - -int aws_read_and_decode_pem_file_to_buffer_list( - struct aws_allocator *alloc, - const char *filename, - struct aws_array_list *cert_chain_or_key) { - - struct aws_byte_buf raw_file_buffer; - if (aws_byte_buf_init_from_file(&raw_file_buffer, alloc, filename)) { - AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: Failed to read file %s.", filename); - return AWS_OP_ERR; - } - AWS_ASSERT(raw_file_buffer.buffer); - - struct aws_byte_cursor file_cursor = aws_byte_cursor_from_buf(&raw_file_buffer); - if (aws_decode_pem_to_buffer_list(alloc, &file_cursor, cert_chain_or_key)) { - aws_secure_zero(raw_file_buffer.buffer, raw_file_buffer.len); - aws_byte_buf_clean_up(&raw_file_buffer); - AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: Failed to decode PEM file %s.", filename); - return AWS_OP_ERR; - } - - aws_secure_zero(raw_file_buffer.buffer, raw_file_buffer.len); - aws_byte_buf_clean_up(&raw_file_buffer); - - return AWS_OP_SUCCESS; -} diff --git a/contrib/restricted/aws/aws-c-io/source/posix/socket.c b/contrib/restricted/aws/aws-c-io/source/posix/socket.c index 7dc170ccea7..0dac9442c14 100644 --- a/contrib/restricted/aws/aws-c-io/source/posix/socket.c +++ b/contrib/restricted/aws/aws-c-io/source/posix/socket.c @@ -9,6 +9,7 @@ #include <aws/common/condition_variable.h> #include <aws/common/mutex.h> #include <aws/common/string.h> +#include <aws/common/uuid.h> #include <aws/io/event_loop.h> #include <aws/io/logging.h> @@ -339,18 +340,7 @@ static int s_update_local_endpoint(struct aws_socket *socket) { } else if (address.ss_family == AF_VSOCK) { struct sockaddr_vm *s = (struct sockaddr_vm *)&address; - /* VSOCK port is 32bit, but aws_socket_endpoint.port is only 16bit. - * Hopefully this isn't an issue, since users can only pass in 16bit values. - * But if it becomes an issue, we'll need to make aws_socket_endpoint more flexible */ - if (s->svm_port > UINT16_MAX) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: aws_socket_endpoint can't deal with VSOCK port > UINT16_MAX", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); - } - tmp_endpoint.port = (uint16_t)s->svm_port; + tmp_endpoint.port = s->svm_port; snprintf(tmp_endpoint.address, sizeof(tmp_endpoint.address), "%" PRIu32, s->svm_cid); return AWS_OP_SUCCESS; @@ -383,7 +373,7 @@ static int s_on_connection_success(struct aws_socket *socket) { if (getsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_ERROR, &connect_result, &result_length) < 0) { int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( + AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: failed to determine connection error %d", (void *)socket, @@ -396,7 +386,7 @@ static int s_on_connection_success(struct aws_socket *socket) { } if (connect_result) { - AWS_LOGF_ERROR( + AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: connection error %d", (void *)socket, @@ -436,7 +426,7 @@ static int s_on_connection_success(struct aws_socket *socket) { static void s_on_connection_error(struct aws_socket *socket, int error) { socket->state = ERROR; - AWS_LOGF_ERROR(AWS_LS_IO_SOCKET, "id=%p fd=%d: connection failure", (void *)socket, socket->io_handle.data.fd); + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: connection failure", (void *)socket, socket->io_handle.data.fd); if (socket->connection_result_fn) { socket->connection_result_fn(socket, error, socket->connect_accept_user_data); } else if (socket->accept_result_fn) { @@ -641,18 +631,22 @@ int aws_socket_connect( return AWS_OP_ERR; } + if (aws_socket_validate_port_for_connect(remote_endpoint->port, socket->options.domain)) { + return AWS_OP_ERR; + } + struct socket_address address; AWS_ZERO_STRUCT(address); socklen_t sock_size = 0; int pton_err = 1; if (socket->options.domain == AWS_SOCKET_IPV4) { pton_err = inet_pton(AF_INET, remote_endpoint->address, &address.sock_addr_types.addr_in.sin_addr); - address.sock_addr_types.addr_in.sin_port = htons(remote_endpoint->port); + address.sock_addr_types.addr_in.sin_port = htons((uint16_t)remote_endpoint->port); address.sock_addr_types.addr_in.sin_family = AF_INET; sock_size = sizeof(address.sock_addr_types.addr_in); } else if (socket->options.domain == AWS_SOCKET_IPV6) { pton_err = inet_pton(AF_INET6, remote_endpoint->address, &address.sock_addr_types.addr_in6.sin6_addr); - address.sock_addr_types.addr_in6.sin6_port = htons(remote_endpoint->port); + address.sock_addr_types.addr_in6.sin6_port = htons((uint16_t)remote_endpoint->port); address.sock_addr_types.addr_in6.sin6_family = AF_INET6; sock_size = sizeof(address.sock_addr_types.addr_in6); } else if (socket->options.domain == AWS_SOCKET_LOCAL) { @@ -663,7 +657,7 @@ int aws_socket_connect( } else if (socket->options.domain == AWS_SOCKET_VSOCK) { pton_err = parse_cid(remote_endpoint->address, &address.sock_addr_types.vm_addr.svm_cid); address.sock_addr_types.vm_addr.svm_family = AF_VSOCK; - address.sock_addr_types.vm_addr.svm_port = (unsigned int)remote_endpoint->port; + address.sock_addr_types.vm_addr.svm_port = remote_endpoint->port; sock_size = sizeof(address.sock_addr_types.vm_addr); #endif } else { @@ -673,23 +667,23 @@ int aws_socket_connect( if (pton_err != 1) { int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( + AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, - "id=%p fd=%d: failed to parse address %s:%d.", + "id=%p fd=%d: failed to parse address %s:%u.", (void *)socket, socket->io_handle.data.fd, remote_endpoint->address, - (int)remote_endpoint->port); + remote_endpoint->port); return aws_raise_error(s_convert_pton_error(pton_err, errno_value)); } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, - "id=%p fd=%d: connecting to endpoint %s:%d.", + "id=%p fd=%d: connecting to endpoint %s:%u.", (void *)socket, socket->io_handle.data.fd, remote_endpoint->address, - (int)remote_endpoint->port); + remote_endpoint->port); socket->state = CONNECTING; socket->remote_endpoint = *remote_endpoint; @@ -769,7 +763,7 @@ int aws_socket_connect( (unsigned long long)timeout); aws_event_loop_schedule_task_future(event_loop, timeout_task, timeout); } else { - AWS_LOGF_ERROR( + AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: connect failed with error code %d.", (void *)socket, @@ -805,13 +799,17 @@ int aws_socket_bind(struct aws_socket *socket, const struct aws_socket_endpoint return AWS_OP_ERR; } + if (aws_socket_validate_port_for_bind(local_endpoint->port, socket->options.domain)) { + return AWS_OP_ERR; + } + AWS_LOGF_INFO( AWS_LS_IO_SOCKET, - "id=%p fd=%d: binding to %s:%d.", + "id=%p fd=%d: binding to %s:%u.", (void *)socket, socket->io_handle.data.fd, local_endpoint->address, - (int)local_endpoint->port); + local_endpoint->port); struct socket_address address; AWS_ZERO_STRUCT(address); @@ -819,12 +817,12 @@ int aws_socket_bind(struct aws_socket *socket, const struct aws_socket_endpoint int pton_err = 1; if (socket->options.domain == AWS_SOCKET_IPV4) { pton_err = inet_pton(AF_INET, local_endpoint->address, &address.sock_addr_types.addr_in.sin_addr); - address.sock_addr_types.addr_in.sin_port = htons(local_endpoint->port); + address.sock_addr_types.addr_in.sin_port = htons((uint16_t)local_endpoint->port); address.sock_addr_types.addr_in.sin_family = AF_INET; sock_size = sizeof(address.sock_addr_types.addr_in); } else if (socket->options.domain == AWS_SOCKET_IPV6) { pton_err = inet_pton(AF_INET6, local_endpoint->address, &address.sock_addr_types.addr_in6.sin6_addr); - address.sock_addr_types.addr_in6.sin6_port = htons(local_endpoint->port); + address.sock_addr_types.addr_in6.sin6_port = htons((uint16_t)local_endpoint->port); address.sock_addr_types.addr_in6.sin6_family = AF_INET6; sock_size = sizeof(address.sock_addr_types.addr_in6); } else if (socket->options.domain == AWS_SOCKET_LOCAL) { @@ -835,7 +833,7 @@ int aws_socket_bind(struct aws_socket *socket, const struct aws_socket_endpoint } else if (socket->options.domain == AWS_SOCKET_VSOCK) { pton_err = parse_cid(local_endpoint->address, &address.sock_addr_types.vm_addr.svm_cid); address.sock_addr_types.vm_addr.svm_family = AF_VSOCK; - address.sock_addr_types.vm_addr.svm_port = (unsigned int)local_endpoint->port; + address.sock_addr_types.vm_addr.svm_port = local_endpoint->port; sock_size = sizeof(address.sock_addr_types.vm_addr); #endif } else { @@ -847,11 +845,11 @@ int aws_socket_bind(struct aws_socket *socket, const struct aws_socket_endpoint int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, - "id=%p fd=%d: failed to parse address %s:%d.", + "id=%p fd=%d: failed to parse address %s:%u.", (void *)socket, socket->io_handle.data.fd, local_endpoint->address, - (int)local_endpoint->port); + local_endpoint->port); return aws_raise_error(s_convert_pton_error(pton_err, errno_value)); } @@ -881,7 +879,7 @@ int aws_socket_bind(struct aws_socket *socket, const struct aws_socket_endpoint AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, - "id=%p fd=%d: successfully bound to %s:%d", + "id=%p fd=%d: successfully bound to %s:%u", (void *)socket, socket->io_handle.data.fd, socket->local_endpoint.address, @@ -995,7 +993,7 @@ static void s_socket_accept_event( new_sock->local_endpoint = socket->local_endpoint; new_sock->state = CONNECTED_READ | CONNECTED_WRITE; - uint16_t port = 0; + uint32_t port = 0; /* get the info on the incoming socket's address */ if (in_addr.ss_family == AF_INET) { @@ -1894,7 +1892,7 @@ int aws_socket_get_error(struct aws_socket *socket) { socklen_t result_length = sizeof(connect_result); if (getsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_ERROR, &connect_result, &result_length) < 0) { - return AWS_OP_ERR; + return s_determine_socket_error(errno); } if (connect_result) { @@ -1907,3 +1905,12 @@ int aws_socket_get_error(struct aws_socket *socket) { bool aws_socket_is_open(struct aws_socket *socket) { return socket->io_handle.data.fd >= 0; } + +void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint *endpoint) { + struct aws_uuid uuid; + AWS_FATAL_ASSERT(aws_uuid_init(&uuid) == AWS_OP_SUCCESS); + char uuid_str[AWS_UUID_STR_LEN] = {0}; + struct aws_byte_buf uuid_buf = aws_byte_buf_from_empty_array(uuid_str, sizeof(uuid_str)); + AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &uuid_buf) == AWS_OP_SUCCESS); + snprintf(endpoint->address, sizeof(endpoint->address), "testsock" PRInSTR ".sock", AWS_BYTE_BUF_PRI(uuid_buf)); +} diff --git a/contrib/restricted/aws/aws-c-io/source/s2n/s2n_tls_channel_handler.c b/contrib/restricted/aws/aws-c-io/source/s2n/s2n_tls_channel_handler.c index 4a7d8bf4507..80770b62be5 100644 --- a/contrib/restricted/aws/aws-c-io/source/s2n/s2n_tls_channel_handler.c +++ b/contrib/restricted/aws/aws-c-io/source/s2n/s2n_tls_channel_handler.c @@ -523,7 +523,7 @@ static int s_s2n_handler_process_read_message( AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Downstream window %llu", (void *)handler, (unsigned long long)downstream_window); - while (processed < downstream_window && blocked == S2N_NOT_BLOCKED) { + while (processed < downstream_window) { struct aws_io_message *outgoing_read_message = aws_channel_acquire_message_from_pool( slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, downstream_window - processed); @@ -558,9 +558,24 @@ static int s_s2n_handler_process_read_message( if (read < 0) { aws_mem_release(outgoing_read_message->allocator, outgoing_read_message); - continue; + + /* the socket blocked so exit from the loop */ + if (s2n_error_get_type(s2n_errno) == S2N_ERR_T_BLOCKED) { + break; + } + + /* the socket returned a fatal error so shut down */ + AWS_LOGF_ERROR( + AWS_LS_IO_TLS, + "id=%p: S2N failed to read with error: %s (%s)", + (void *)handler, + s2n_strerror(s2n_errno, "EN"), + s2n_strerror_debug(s2n_errno, "EN")); + aws_channel_shutdown(slot->channel, AWS_IO_TLS_ERROR_READ_FAILURE); + return AWS_OP_SUCCESS; }; + /* if read > 0 */ processed += read; outgoing_read_message->message_data.len = (size_t)read; @@ -1297,8 +1312,9 @@ struct aws_channel_handler *aws_tls_server_handler_new( static void s_s2n_ctx_destroy(struct s2n_ctx *s2n_ctx) { if (s2n_ctx != NULL) { - s2n_config_free(s2n_ctx->s2n_config); - + if (s2n_ctx->s2n_config) { + s2n_config_free(s2n_ctx->s2n_config); + } if (s2n_ctx->custom_cert_chain_and_key) { s2n_cert_chain_and_key_free(s2n_ctx->custom_cert_chain_and_key); } @@ -1372,20 +1388,22 @@ static struct aws_tls_ctx *s_tls_ctx_new( goto cleanup_s2n_config; } + const char *security_policy = NULL; if (options->custom_key_op_handler != NULL) { - /* PKCS#11 integration hasn't been tested with TLS 1.3, so don't use cipher preferences that allow 1.3 */ + /* When custom_key_op_handler is set, don't use security policy that allow TLS 1.3. + * This hack is necessary until our PKCS#11 custom_key_op_handler supports RSA PSS */ switch (options->minimum_tls_version) { case AWS_IO_SSLv3: - s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "CloudFront-SSL-v-3"); + security_policy = "CloudFront-SSL-v-3"; break; case AWS_IO_TLSv1: - s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "CloudFront-TLS-1-0-2014"); + security_policy = "CloudFront-TLS-1-0-2014"; break; case AWS_IO_TLSv1_1: - s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "ELBSecurityPolicy-TLS-1-1-2017-01"); + security_policy = "ELBSecurityPolicy-TLS-1-1-2017-01"; break; case AWS_IO_TLSv1_2: - s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"); + security_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"; break; case AWS_IO_TLSv1_3: AWS_LOGF_ERROR(AWS_LS_IO_TLS, "TLS 1.3 with PKCS#11 is not supported yet."); @@ -1393,28 +1411,29 @@ static struct aws_tls_ctx *s_tls_ctx_new( goto cleanup_s2n_config; case AWS_IO_TLS_VER_SYS_DEFAULTS: default: - s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "ELBSecurityPolicy-TLS-1-1-2017-01"); + security_policy = "ELBSecurityPolicy-TLS-1-1-2017-01"; } } else { + /* No custom_key_op_handler is set, use normal security policies */ switch (options->minimum_tls_version) { case AWS_IO_SSLv3: - s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "AWS-CRT-SDK-SSLv3.0"); + security_policy = "AWS-CRT-SDK-SSLv3.0-2023"; break; case AWS_IO_TLSv1: - s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "AWS-CRT-SDK-TLSv1.0"); + security_policy = "AWS-CRT-SDK-TLSv1.0-2023"; break; case AWS_IO_TLSv1_1: - s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "AWS-CRT-SDK-TLSv1.1"); + security_policy = "AWS-CRT-SDK-TLSv1.1-2023"; break; case AWS_IO_TLSv1_2: - s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "AWS-CRT-SDK-TLSv1.2"); + security_policy = "AWS-CRT-SDK-TLSv1.2-2023"; break; case AWS_IO_TLSv1_3: - s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "AWS-CRT-SDK-TLSv1.3"); + security_policy = "AWS-CRT-SDK-TLSv1.3-2023"; break; case AWS_IO_TLS_VER_SYS_DEFAULTS: default: - s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "AWS-CRT-SDK-TLSv1.0"); + security_policy = "AWS-CRT-SDK-TLSv1.0-2023"; } } @@ -1423,7 +1442,7 @@ static struct aws_tls_ctx *s_tls_ctx_new( /* No-Op, if the user configured a minimum_tls_version then a version-specific Cipher Preference was set */ break; case AWS_IO_TLS_CIPHER_PREF_PQ_TLSv1_0_2021_05: - s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "PQ-TLS-1-0-2021-05-26"); + security_policy = "PQ-TLS-1-0-2021-05-26"; break; default: AWS_LOGF_ERROR(AWS_LS_IO_TLS, "Unrecognized TLS Cipher Preference: %d", options->cipher_pref); @@ -1431,6 +1450,18 @@ static struct aws_tls_ctx *s_tls_ctx_new( goto cleanup_s2n_config; } + AWS_ASSERT(security_policy != NULL); + if (s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, security_policy)) { + AWS_LOGF_ERROR( + AWS_LS_IO_TLS, + "ctx: Failed setting security policy '%s' (newer S2N required?): %s (%s)", + security_policy, + s2n_strerror(s2n_errno, "EN"), + s2n_strerror_debug(s2n_errno, "EN")); + aws_raise_error(AWS_IO_TLS_CTX_ERROR); + goto cleanup_s2n_config; + } + if (aws_tls_options_buf_is_set(&options->certificate) && aws_tls_options_buf_is_set(&options->private_key)) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "ctx: Certificate and key have been set, setting them up now."); diff --git a/contrib/restricted/aws/aws-c-io/source/socket_shared.c b/contrib/restricted/aws/aws-c-io/source/socket_shared.c new file mode 100644 index 00000000000..63c640b4926 --- /dev/null +++ b/contrib/restricted/aws/aws-c-io/source/socket_shared.c @@ -0,0 +1,75 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include <aws/io/socket.h> + +#include <aws/io/logging.h> + +/* common validation for connect() and bind() */ +static int s_socket_validate_port_for_domain(uint32_t port, enum aws_socket_domain domain) { + switch (domain) { + case AWS_SOCKET_IPV4: + case AWS_SOCKET_IPV6: + if (port > UINT16_MAX) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "Invalid port=%u for %s. Cannot exceed 65535", + port, + domain == AWS_SOCKET_IPV4 ? "IPv4" : "IPv6"); + return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); + } + break; + + case AWS_SOCKET_LOCAL: + /* port is ignored */ + break; + + case AWS_SOCKET_VSOCK: + /* any 32bit port is legal */ + break; + + default: + AWS_LOGF_ERROR(AWS_LS_IO_SOCKET, "Cannot validate port for unknown domain=%d", domain); + return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); + } + return AWS_OP_SUCCESS; +} + +int aws_socket_validate_port_for_connect(uint32_t port, enum aws_socket_domain domain) { + if (s_socket_validate_port_for_domain(port, domain)) { + return AWS_OP_ERR; + } + + /* additional validation */ + switch (domain) { + case AWS_SOCKET_IPV4: + case AWS_SOCKET_IPV6: + if (port == 0) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "Invalid port=%u for %s connections. Must use 1-65535", + port, + domain == AWS_SOCKET_IPV4 ? "IPv4" : "IPv6"); + return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); + } + break; + + case AWS_SOCKET_VSOCK: + if (port == (uint32_t)-1) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, "Invalid port for VSOCK connections. Cannot use VMADDR_PORT_ANY (-1U)."); + return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); + } + break; + + default: + /* no extra validation */ + break; + } + return AWS_OP_SUCCESS; +} + +int aws_socket_validate_port_for_bind(uint32_t port, enum aws_socket_domain domain) { + return s_socket_validate_port_for_domain(port, domain); +} diff --git a/contrib/restricted/aws/aws-c-io/source/stream.c b/contrib/restricted/aws/aws-c-io/source/stream.c index b8040946d23..ecc5652176e 100644 --- a/contrib/restricted/aws/aws-c-io/source/stream.c +++ b/contrib/restricted/aws/aws-c-io/source/stream.c @@ -7,6 +7,7 @@ #include <aws/common/file.h> #include <aws/io/file_utils.h> +#include <aws/io/private/tracing.h> #include <errno.h> @@ -32,7 +33,9 @@ int aws_input_stream_read(struct aws_input_stream *stream, struct aws_byte_buf * const size_t safe_buf_capacity = dest->capacity - dest->len; struct aws_byte_buf safe_buf = aws_byte_buf_from_empty_array(safe_buf_start, safe_buf_capacity); + __itt_task_begin(io_tracing_domain, __itt_null, __itt_null, tracing_input_stream_read); int read_result = stream->vtable->read(stream, &safe_buf); + __itt_task_end(io_tracing_domain); /* Ensure the implementation did not commit forbidden acts upon the buffer */ AWS_FATAL_ASSERT( @@ -287,7 +290,7 @@ struct aws_input_stream *aws_input_stream_new_from_file(struct aws_allocator *al struct aws_input_stream_file_impl *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_input_stream_file_impl)); - impl->file = aws_fopen(file_name, "r+b"); + impl->file = aws_fopen(file_name, "rb"); if (impl->file == NULL) { goto on_error; } diff --git a/contrib/restricted/aws/aws-c-io/source/tls_channel_handler.c b/contrib/restricted/aws/aws-c-io/source/tls_channel_handler.c index aa80459b723..5c6426872c2 100644 --- a/contrib/restricted/aws/aws-c-io/source/tls_channel_handler.c +++ b/contrib/restricted/aws/aws-c-io/source/tls_channel_handler.c @@ -199,7 +199,6 @@ int aws_tls_ctx_options_init_client_mtls_with_pkcs11( int custom_key_result = AWS_OP_ERR; if (pkcs11_handler == NULL) { - aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto finish; } @@ -441,6 +440,8 @@ int aws_tls_ctx_options_init_default_server( } int aws_tls_ctx_options_set_alpn_list(struct aws_tls_ctx_options *options, const char *alpn_list) { + aws_string_destroy(options->alpn_list); + options->alpn_list = aws_string_new_from_c_str(options->allocator, alpn_list); if (!options->alpn_list) { return AWS_OP_ERR; diff --git a/contrib/restricted/aws/aws-c-io/source/tracing.c b/contrib/restricted/aws/aws-c-io/source/tracing.c new file mode 100644 index 00000000000..680e96613b9 --- /dev/null +++ b/contrib/restricted/aws/aws-c-io/source/tracing.c @@ -0,0 +1,20 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include <aws/io/private/tracing.h> + +__itt_domain *io_tracing_domain; +__itt_string_handle *tracing_input_stream_read; +__itt_string_handle *tracing_event_loop_run_tasks; +__itt_string_handle *tracing_event_loop_event; +__itt_string_handle *tracing_event_loop_events; + +void aws_io_tracing_init(void) { + io_tracing_domain = __itt_domain_create("aws.c.io"); + tracing_input_stream_read = __itt_string_handle_create("Read:InputStream"); + tracing_event_loop_run_tasks = __itt_string_handle_create("RunTasks:EventLoop"); + tracing_event_loop_event = __itt_string_handle_create("IOEvent:EventLoop"); + tracing_event_loop_events = __itt_string_handle_create("IOEvents:EventLoop"); +} diff --git a/contrib/restricted/aws/aws-c-io/ya.make b/contrib/restricted/aws/aws-c-io/ya.make index f34f5a86f8c..a01ba6cd29a 100644 --- a/contrib/restricted/aws/aws-c-io/ya.make +++ b/contrib/restricted/aws/aws-c-io/ya.make @@ -1,17 +1,17 @@ -# Generated by devtools/yamaker from nixpkgs 22.11. +# Generated by devtools/yamaker from nixpkgs 24.05. LIBRARY() LICENSE( Apache-2.0 AND - Custom-Oasis-Pkcs11 + Public-Domain ) LICENSE_TEXTS(.yandex_meta/licenses.list.txt) -VERSION(0.13.21) +VERSION(0.14.7) -ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-io/archive/v0.13.21.tar.gz) +ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-io/archive/v0.14.7.tar.gz) PEERDIR( contrib/restricted/aws/aws-c-cal @@ -32,17 +32,7 @@ CFLAGS( -DAWS_IO_USE_IMPORT_EXPORT -DAWS_USE_EPOLL -DHAVE_SYSCONF - -DS2N_CLONE_SUPPORTED - -DS2N_CPUID_AVAILABLE - -DS2N_FALL_THROUGH_SUPPORTED - -DS2N_FEATURES_AVAILABLE - -DS2N_KYBER512R3_AVX2_BMI2 - -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH - -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX - -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4 - -DS2N_MADVISE_SUPPORTED - -DS2N_STACKTRACE - -DS2N___RESTRICT__SUPPORTED + -DINTEL_NO_ITTNOTIFY_API ) IF (CLANG_CL) @@ -67,24 +57,27 @@ ENDIF() SRCS( source/alpn_handler.c + source/async_stream.c source/channel.c source/channel_bootstrap.c source/event_loop.c source/exponential_backoff_retry_strategy.c + source/future.c source/host_resolver.c source/io.c source/message_pool.c - source/pem_utils.c + source/pem.c source/pkcs11_lib.c source/pkcs11_tls_op_handler.c - source/pki_utils.c source/retry_strategy.c source/socket_channel_handler.c + source/socket_shared.c source/standard_retry_strategy.c source/statistics.c source/stream.c source/tls_channel_handler.c source/tls_channel_handler_shared.c + source/tracing.c ) IF (OS_DARWIN) diff --git a/contrib/restricted/aws/aws-c-s3/.yandex_meta/devtools.copyrights.report b/contrib/restricted/aws/aws-c-s3/.yandex_meta/devtools.copyrights.report index 4a2e81acd28..5bac2999410 100644 --- a/contrib/restricted/aws/aws-c-s3/.yandex_meta/devtools.copyrights.report +++ b/contrib/restricted/aws/aws-c-s3/.yandex_meta/devtools.copyrights.report @@ -45,6 +45,7 @@ BELONGS ya.make include/aws/s3/exports.h [5:5] include/aws/s3/private/s3_auto_ranged_get.h [5:5] include/aws/s3/private/s3_auto_ranged_put.h [5:5] + include/aws/s3/private/s3_buffer_pool.h [5:5] include/aws/s3/private/s3_checksums.h [4:4] include/aws/s3/private/s3_client_impl.h [5:5] include/aws/s3/private/s3_copy_object.h [5:5] @@ -53,14 +54,18 @@ BELONGS ya.make include/aws/s3/private/s3_list_parts.h [5:5] include/aws/s3/private/s3_meta_request_impl.h [5:5] include/aws/s3/private/s3_paginator.h [5:5] + include/aws/s3/private/s3_parallel_input_stream.h [2:2] + include/aws/s3/private/s3_platform_info.h [4:4] include/aws/s3/private/s3_request.h [5:5] include/aws/s3/private/s3_request_messages.h [5:5] include/aws/s3/private/s3_util.h [5:5] include/aws/s3/s3.h [5:5] include/aws/s3/s3_client.h [5:5] + include/aws/s3/s3express_credentials_provider.h [5:5] source/s3.c [2:2] source/s3_auto_ranged_get.c [2:2] source/s3_auto_ranged_put.c [2:2] + source/s3_buffer_pool.c [2:2] source/s3_checksum_stream.c [2:2] source/s3_chunk_stream.c [2:2] source/s3_client.c [2:2] @@ -70,5 +75,9 @@ BELONGS ya.make source/s3_list_parts.c [2:2] source/s3_meta_request.c [2:2] source/s3_paginator.c [2:2] + source/s3_parallel_input_stream.c [2:2] + source/s3_platform_info.c [2:2] + source/s3_request.c [2:2] source/s3_request_messages.c [2:2] source/s3_util.c [2:2] + source/s3express_credentials_provider.c [2:2] diff --git a/contrib/restricted/aws/aws-c-s3/.yandex_meta/devtools.licenses.report b/contrib/restricted/aws/aws-c-s3/.yandex_meta/devtools.licenses.report index a4da13944cc..e9b4c04f957 100644 --- a/contrib/restricted/aws/aws-c-s3/.yandex_meta/devtools.licenses.report +++ b/contrib/restricted/aws/aws-c-s3/.yandex_meta/devtools.licenses.report @@ -33,6 +33,19 @@ # FILE_INCLUDE - include all file data into licenses text file # ======================= +KEEP Apache-2.0 1a2162d65587b1c6b4482cab8e65b94f +BELONGS ya.make + License text: + \## License + This library is licensed under the Apache 2.0 License. + Scancode info: + Original SPDX id: Apache-2.0 + Score : 100.00 + Match type : NOTICE + Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0 + Files with this license: + README.md [18:20] + KEEP Apache-2.0 314e21a036e4ea071f111cb5deff83c1 BELONGS ya.make FILE_INCLUDE NOTICE found in files: LICENSE at line 107, LICENSE at line 110, LICENSE at line 112, LICENSE at line 117, LICENSE at line 120, LICENSE at line 142 @@ -57,19 +70,6 @@ BELONGS ya.make Files with this license: CONTRIBUTING.md [61:61] -KEEP Apache-2.0 6c901454b872854c0dea3ec06b67701a -BELONGS ya.make - License text: - \## License - This library is licensed under the Apache 2.0 License. - Scancode info: - Original SPDX id: Apache-2.0 - Score : 100.00 - Match type : NOTICE - Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0 - Files with this license: - README.md [5:7] - KEEP Apache-2.0 d591512e466bb957030b8857f753349e BELONGS ya.make License text: @@ -83,6 +83,7 @@ BELONGS ya.make include/aws/s3/exports.h [6:6] include/aws/s3/private/s3_auto_ranged_get.h [6:6] include/aws/s3/private/s3_auto_ranged_put.h [6:6] + include/aws/s3/private/s3_buffer_pool.h [6:6] include/aws/s3/private/s3_checksums.h [5:5] include/aws/s3/private/s3_client_impl.h [6:6] include/aws/s3/private/s3_copy_object.h [6:6] @@ -91,14 +92,18 @@ BELONGS ya.make include/aws/s3/private/s3_list_parts.h [6:6] include/aws/s3/private/s3_meta_request_impl.h [6:6] include/aws/s3/private/s3_paginator.h [6:6] + include/aws/s3/private/s3_parallel_input_stream.h [3:3] + include/aws/s3/private/s3_platform_info.h [5:5] include/aws/s3/private/s3_request.h [6:6] include/aws/s3/private/s3_request_messages.h [6:6] include/aws/s3/private/s3_util.h [6:6] include/aws/s3/s3.h [6:6] include/aws/s3/s3_client.h [6:6] + include/aws/s3/s3express_credentials_provider.h [6:6] source/s3.c [3:3] source/s3_auto_ranged_get.c [3:3] source/s3_auto_ranged_put.c [3:3] + source/s3_buffer_pool.c [3:3] source/s3_checksum_stream.c [3:3] source/s3_chunk_stream.c [3:3] source/s3_client.c [3:3] @@ -108,8 +113,12 @@ BELONGS ya.make source/s3_list_parts.c [3:3] source/s3_meta_request.c [3:3] source/s3_paginator.c [3:3] + source/s3_parallel_input_stream.c [3:3] + source/s3_platform_info.c [3:3] + source/s3_request.c [3:3] source/s3_request_messages.c [3:3] source/s3_util.c [3:3] + source/s3express_credentials_provider.c [3:3] SKIP LicenseRef-scancode-generic-cla ee24fdc60600747c7d12c32055b0011d BELONGS ya.make diff --git a/contrib/restricted/aws/aws-c-s3/.yandex_meta/override.nix b/contrib/restricted/aws/aws-c-s3/.yandex_meta/override.nix index 5e3697a93fa..0cd520815f7 100644 --- a/contrib/restricted/aws/aws-c-s3/.yandex_meta/override.nix +++ b/contrib/restricted/aws/aws-c-s3/.yandex_meta/override.nix @@ -1,10 +1,10 @@ pkgs: attrs: with pkgs; with attrs; rec { - version = "0.2.8"; + version = "0.5.7"; src = fetchFromGitHub { owner = "awslabs"; repo = "aws-c-s3"; rev = "v${version}"; - hash = "sha256-kwYzsKdEy+e0GxqYcakcdwoaC2LLPZe8E7bZNrmqok0="; + hash = "sha256-zzsRYhLgJfd02fPgoZBf7n6dTfbLHarc1aQa0fx/uck="; }; } diff --git a/contrib/restricted/aws/aws-c-s3/README.md b/contrib/restricted/aws/aws-c-s3/README.md index bc46111ec50..c204d4ec394 100644 --- a/contrib/restricted/aws/aws-c-s3/README.md +++ b/contrib/restricted/aws/aws-c-s3/README.md @@ -1,6 +1,19 @@ ## AWS C S3 -C99 library implementation for communicating with the S3 service, designed for maximizing throughput on high bandwidth EC2 instances. +The AWS-C-S3 library is an asynchronous AWS S3 client focused on maximizing throughput and network utilization. + +### Key features: +- **Automatic Request Splitting**: Improves throughput by automatically splitting the request into part-sized chunks and performing parallel uploads/downloads of these chunks over multiple connections. There's a cap on the throughput of single S3 connection, the only way to go faster is multiple parallel connections. +- **Automatic Retries**: Increases resilience by retrying individual failed chunks of a file transfer, eliminating the need to restart transfers from scratch after an intermittent error. +- **DNS Load Balancing**: DNS resolver continuously harvests Amazon S3 IP addresses. When load is spread across the S3 fleet, overall throughput more reliable than if all connections are going to a single IP. +- **Advanced Network Management**: The client incorporates automatic request parallelization, effective timeouts and retries, and efficient connection reuse. This approach helps to maximize throughput and network utilization, and to avoid network overloads. +- **Thread Pools and Async I/O**: Avoids bottlenecks associated with single-thread processing. +- **Parallel Reads**: When uploading a large file from disk, reads from multiple parts of the file in parallel. This is faster than reading the file sequentially from beginning to end. + +### Documentation + +- [GetObject](docs/GetObject.md): A visual representation of the GetObject request flow. +- [Memory Aware Requests Execution](docs/memory_aware_request_execution.md): An in-depth guide on optimizing memory usage during request executions. ## License @@ -68,6 +81,23 @@ cmake -S aws-c-s3 -B aws-c-s3/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAK cmake --build aws-c-s3/build --target install ``` +#### Running S3 sample + +After installing all the dependencies, and building aws-c-s3, you can run the sample directly from the s3 build directory. + +To download: +``` +aws-c-s3/build/samples/s3/s3 cp s3://<bucket-name>/<object-name> <download-path> --region <region> +``` +To upload: +``` +aws-c-s3/build/samples/s3/s3 cp <upload-path> s3://<bucket-name>/<object-name> --region <region> +``` +To list objects: +``` +aws-c-s3/build/samples/s3/s3 ls s3://<bucket-name> --region <region> +``` + ## Testing The unit tests require an AWS account with S3 buckets set up in a particular way. diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_get.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_get.h index 3845b962926..3281cb6f16a 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_get.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_get.h @@ -10,22 +10,39 @@ enum aws_s3_auto_ranged_get_request_type { AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT, - AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART, - AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_INITIAL_MESSAGE, + AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE, + AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1, }; struct aws_s3_auto_ranged_get { struct aws_s3_meta_request base; enum aws_s3_checksum_algorithm validation_algorithm; + + struct aws_string *etag; + + bool initial_message_has_start_range; + bool initial_message_has_end_range; + uint64_t initial_range_start; + uint64_t initial_range_end; + + uint64_t object_size_hint; + bool object_size_hint_available; + /* Members to only be used when the mutex in the base type is locked. */ struct { - /* The starting byte of the data that we will be retrieved from the object.*/ + /* The starting byte of the data that we will be retrieved from the object. + * (ignore this if object_range_empty) */ uint64_t object_range_start; - /* The last byte of the data that will be retrieved from the object.*/ + /* The last byte of the data that will be retrieved from the object. + * (ignore this if object_range_empty) + * Note this is inclusive: https://developer.mozilla.org/en-US/docs/Web/HTTP/Range_requests + * So if begin=0 and end=0 then 1 byte is being downloaded. */ uint64_t object_range_end; + uint64_t first_part_size; + /* The total number of parts that are being used in downloading the object range. Note that "part" here * currently refers to a range-get, and does not require a "part" on the service side. */ uint32_t total_num_parts; @@ -37,17 +54,17 @@ struct aws_s3_auto_ranged_get { uint32_t num_parts_checksum_validated; uint32_t object_range_known : 1; + + /* True if object_range_known, and it's found to be empty. + * If this is true, ignore object_range_start and object_range_end */ + uint32_t object_range_empty : 1; uint32_t head_object_sent : 1; uint32_t head_object_completed : 1; - uint32_t get_without_range_sent : 1; - uint32_t get_without_range_completed : 1; uint32_t read_window_warning_issued : 1; } synced_data; uint32_t initial_message_has_range_header : 1; uint32_t initial_message_has_if_match_header : 1; - - struct aws_string *etag; }; AWS_EXTERN_C_BEGIN diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_put.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_put.h index 42b5a02f9e5..b10d83388f4 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_put.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_put.h @@ -29,6 +29,13 @@ struct aws_s3_auto_ranged_put { struct aws_s3_meta_request_resume_token *resume_token; uint64_t content_length; + bool has_content_length; + + /* + * total_num_parts_from_content_length is calculated by content_length / part_size. + * It will be 0 if there is no content_length. + */ + uint32_t total_num_parts_from_content_length; /* Only meant for use in the update function, which is never called concurrently. */ struct { @@ -40,43 +47,40 @@ struct aws_s3_auto_ranged_put { uint32_t next_part_number; } threaded_update_data; - /* - * Should only be used during prepare requests. Note: stream reads must be sequential, - * so prepare currently never runs concurrently with another prepare - */ - struct { - /* - * How many parts have been read from input steam. - * Since reads are always sequential, this is essentially the number of how many parts were read from start of - * stream. - */ - uint32_t num_parts_read_from_stream; - } prepare_data; - - /* - * Very similar to the etag_list used in complete_multipart_upload to create the XML payload. Each part will set the - * corresponding index to it's checksum result, so while the list is shared across threads each index will only be - * accessed once to initialize by the corresponding part number, and then again during the complete multipart upload - * request which will only be invoked after all other parts/threads have completed. - */ - struct aws_byte_buf *encoded_checksum_list; - /* Members to only be used when the mutex in the base type is locked. */ struct { - /* Array list of `struct aws_string *`. */ - struct aws_array_list etag_list; + /* Array list of `struct aws_s3_mpu_part_info *` + * Info about each part, that we need to remember for CompleteMultipartUpload. + * This is updated as we upload each part. + * If resuming an upload, we first call ListParts and store the details + * of previously uploaded parts here. In this case, the array may start with gaps + * (e.g. if parts 1 and 3 were previously uploaded, but not part 2). */ + struct aws_array_list part_list; struct aws_s3_paginated_operation *list_parts_operation; struct aws_string *list_parts_continuation_token; - uint32_t total_num_parts; - uint32_t num_parts_sent; + /* Number of parts we've started work on */ + uint32_t num_parts_started; + /* Number of parts we've started, and we have no more work to do */ uint32_t num_parts_completed; uint32_t num_parts_successful; uint32_t num_parts_failed; + /* When content length is not known, requests are optimistically + * scheduled, below represents how many requests were scheduled and had no + * work to do*/ + uint32_t num_parts_noop; + + /* Number of parts we've started, but they're not done reading from stream yet. + * Though reads are serial (only 1 part can be reading from stream at a time) + * we may queue up more to minimize delays between each read. */ + uint32_t num_parts_pending_read; struct aws_http_headers *needed_response_headers; + /* Whether body stream is exhausted. */ + bool is_body_stream_at_end; + int list_parts_error_code; int create_multipart_upload_error_code; int complete_multipart_upload_error_code; @@ -102,12 +106,16 @@ struct aws_s3_auto_ranged_put { AWS_EXTERN_C_BEGIN -/* Creates a new auto-ranged put meta request. This will do a multipart upload in parallel when appropriate. */ +/* Creates a new auto-ranged put meta request. + * This will do a multipart upload in parallel when appropriate. + * Note: if has_content_length is false, content_length and num_parts are ignored. + */ AWS_S3_API struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_put_new( struct aws_allocator *allocator, struct aws_s3_client *client, size_t part_size, + bool has_content_length, uint64_t content_length, uint32_t num_parts, const struct aws_s3_meta_request_options *options); diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_buffer_pool.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_buffer_pool.h new file mode 100644 index 00000000000..43d54386477 --- /dev/null +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_buffer_pool.h @@ -0,0 +1,136 @@ +#ifndef AWS_S3_BUFFER_ALLOCATOR_H +#define AWS_S3_BUFFER_ALLOCATOR_H + +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include <aws/s3/s3.h> + +/* + * S3 buffer pool. + * Buffer pool used for pooling part sized buffers for Put/Get operations. + * Provides additional functionally for limiting overall memory used. + * High-level buffer pool usage flow: + * - Create buffer with overall memory limit and common buffer size, aka chunk + * size (typically part size configured on client) + * - For each request: + * -- call reserve to acquire ticket for future buffer acquisition. this will + * mark memory reserved, but would not allocate it. if reserve call hits + * memory limit, it fails and reservation hold is put on the whole buffer + * pool. (aws_s3_buffer_pool_remove_reservation_hold can be used to remove + * reservation hold). + * -- once request needs memory, it can exchange ticket for a buffer using + * aws_s3_buffer_pool_acquire_buffer. this operation never fails, even if it + * ends up going over memory limit. + * -- buffer lifetime is tied to the ticket. so once request is done with the + * buffer, ticket is released and buffer returns back to the pool. + */ + +AWS_EXTERN_C_BEGIN + +struct aws_s3_buffer_pool; +struct aws_s3_buffer_pool_ticket; + +struct aws_s3_buffer_pool_usage_stats { + /* Effective Max memory limit. Memory limit value provided during construction minus + * buffer reserved for overhead of the pool */ + size_t mem_limit; + + /* Max size of buffer to be allocated from primary. */ + size_t primary_cutoff; + + /* How much mem is used in primary storage. includes memory used by blocks + * that are waiting on all allocs to release before being put back in circulation. */ + size_t primary_used; + /* Overall memory allocated for blocks. */ + size_t primary_allocated; + /* Reserved memory. Does not account for how that memory will map into + * blocks and in practice can be lower than used memory. */ + size_t primary_reserved; + /* Number of blocks allocated in primary. */ + size_t primary_num_blocks; + + /* Secondary mem used. Accurate, maps directly to base allocator. */ + size_t secondary_used; + /* Secondary mem reserved. Accurate, maps directly to base allocator. */ + size_t secondary_reserved; +}; + +/* + * Create new buffer pool. + * chunk_size - specifies the size of memory that will most commonly be acquired + * from the pool (typically part size). + * mem_limit - limit on how much mem buffer pool can use. once limit is hit, + * buffers can no longer be reserved from (reservation hold is placed on the pool). + * Returns buffer pool pointer on success and NULL on failure. + */ +AWS_S3_API struct aws_s3_buffer_pool *aws_s3_buffer_pool_new( + struct aws_allocator *allocator, + size_t chunk_size, + size_t mem_limit); + +/* + * Destroys buffer pool. + * Does nothing if buffer_pool is NULL. + */ +AWS_S3_API void aws_s3_buffer_pool_destroy(struct aws_s3_buffer_pool *buffer_pool); + +/* + * Reserves memory from the pool for later use. + * Best effort and can potentially reserve memory slightly over the limit. + * Reservation takes some memory out of the available pool, but does not + * allocate it right away. + * On success ticket will be returned. + * On failure NULL is returned, error is raised and reservation hold is placed + * on the buffer. Any further reservations while hold is active will fail. + * Remove reservation hold to unblock reservations. + */ +AWS_S3_API struct aws_s3_buffer_pool_ticket *aws_s3_buffer_pool_reserve( + struct aws_s3_buffer_pool *buffer_pool, + size_t size); + +/* + * Whether pool has a reservation hold. + */ +AWS_S3_API bool aws_s3_buffer_pool_has_reservation_hold(struct aws_s3_buffer_pool *buffer_pool); + +/* + * Remove reservation hold on pool. + */ +AWS_S3_API void aws_s3_buffer_pool_remove_reservation_hold(struct aws_s3_buffer_pool *buffer_pool); + +/* + * Trades in the ticket for a buffer. + * Cannot fail and can over allocate above mem limit if reservation was not accurate. + * Using the same ticket twice will return the same buffer. + * Buffer is only valid until the ticket is released. + */ +AWS_S3_API struct aws_byte_buf aws_s3_buffer_pool_acquire_buffer( + struct aws_s3_buffer_pool *buffer_pool, + struct aws_s3_buffer_pool_ticket *ticket); + +/* + * Releases the ticket. + * Any buffers associated with the ticket are invalidated. + */ +AWS_S3_API void aws_s3_buffer_pool_release_ticket( + struct aws_s3_buffer_pool *buffer_pool, + struct aws_s3_buffer_pool_ticket *ticket); + +/* + * Get pool memory usage stats. + */ +AWS_S3_API struct aws_s3_buffer_pool_usage_stats aws_s3_buffer_pool_get_usage(struct aws_s3_buffer_pool *buffer_pool); + +/* + * Trims all unused mem from the pool. + * Warning: fairly slow operation, do not use in critical path. + * TODO: partial trimming? ex. only trim down to 50% of max? + */ +AWS_S3_API void aws_s3_buffer_pool_trim(struct aws_s3_buffer_pool *buffer_pool); + +AWS_EXTERN_C_END + +#endif /* AWS_S3_BUFFER_ALLOCATOR_H */ diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_client_impl.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_client_impl.h index ec0ff66c9eb..b2b5154e604 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_client_impl.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_client_impl.h @@ -59,7 +59,7 @@ struct aws_s3_endpoint_options { uint32_t max_connections; /* HTTP port override. If zero, determine port based on TLS context */ - uint16_t port; + uint32_t port; /** * Optional. @@ -162,12 +162,42 @@ struct aws_s3_client_vtable { void (*endpoint_shutdown_callback)(struct aws_s3_client *client); void (*finish_destroy)(struct aws_s3_client *client); + + struct aws_parallel_input_stream *( + *parallel_input_stream_new_from_file)(struct aws_allocator *allocator, struct aws_byte_cursor file_name); +}; + +struct aws_s3_upload_part_timeout_stats { + bool stop_timeout; + + /* Total number of successful upload requests */ + uint64_t num_successful_upload_requests; + + /* Stats for the request time of first 10 succeed requests */ + struct { + uint64_t sum_ns; + uint64_t num_samples; + } initial_request_time; + + /* Track the timeout rate. */ + struct { + uint64_t num_completed; + uint64_t num_failed; + } timeout_rate_tracking; + + /* Stats for the response to first byte time of tracked succeed requests */ + struct { + uint64_t sum_ns; + uint64_t num_samples; + } response_to_first_byte_time; }; /* Represents the state of the S3 client. */ struct aws_s3_client { struct aws_allocator *allocator; + struct aws_s3_buffer_pool *buffer_pool; + struct aws_s3_client_vtable *vtable; struct aws_ref_count ref_count; @@ -190,7 +220,13 @@ struct aws_s3_client { /* Size of parts for files when doing gets or puts. This exists on the client as configurable option that is passed * to meta requests for use. */ - const size_t max_part_size; + const uint64_t max_part_size; + + /* The size threshold in bytes for when to use multipart uploads for a AWS_S3_META_REQUEST_TYPE_PUT_OBJECT meta + * request. Uploads over this size will automatically use a multipart upload strategy, while uploads smaller or + * equal to this threshold will use a single request to upload the whole object. If not set, `part_size` will be + * used as threshold. */ + const uint64_t multipart_upload_threshold; /* TLS Options to be used for each connection. */ struct aws_tls_connection_options *tls_connection_options; @@ -198,11 +234,16 @@ struct aws_s3_client { /* Cached signing config. Can be NULL if no signing config was specified. */ struct aws_cached_signing_config_aws *cached_signing_config; + /* The auth provider for S3 Express. */ + aws_s3express_provider_factory_fn *s3express_provider_factory; + void *factory_user_data; + struct aws_s3express_credentials_provider *s3express_provider; + /* Throughput target in Gbps that we are trying to reach. */ const double throughput_target_gbps; - /* The calculated ideal number of VIP's based on throughput target and throughput per vip. */ - const uint32_t ideal_vip_count; + /* The calculated ideal number of HTTP connections, based on throughput target and throughput per connection. */ + const uint32_t ideal_connection_count; /** * For multi-part upload, content-md5 will be calculated if the AWS_MR_CONTENT_MD5_ENABLED is specified @@ -267,6 +308,11 @@ struct aws_s3_client { * Ignored unless `enable_read_backpressure` is true. */ const size_t initial_read_window; + /** + * Timeout in ms for upload request for request after sending to the response first byte received. + */ + struct aws_atomic_var upload_timeout_ms; + struct { /* Number of overall requests currently being processed by the client. */ struct aws_atomic_var num_requests_in_flight; @@ -277,8 +323,8 @@ struct aws_s3_client { /* Number of requests sitting in their meta request priority queue, waiting to be streamed. */ struct aws_atomic_var num_requests_stream_queued_waiting; - /* Number of requests currently scheduled to be streamed or are actively being streamed. */ - struct aws_atomic_var num_requests_streaming; + /* Number of requests currently scheduled to be streamed the response body or are actively being streamed. */ + struct aws_atomic_var num_requests_streaming_response; } stats; struct { @@ -301,6 +347,12 @@ struct aws_s3_client { /* Task for processing requests from meta requests on connections. */ struct aws_task process_work_task; + /* Task for trimming buffer bool. */ + struct aws_task trim_buffer_pool_task; + + /* Task to cleanup endpoints */ + struct aws_task endpoints_cleanup_task; + /* Number of endpoints currently allocated. Used during clean up to know how many endpoints are still in * memory.*/ uint32_t num_endpoints_allocated; @@ -321,9 +373,16 @@ struct aws_s3_client { * shutdown callback has not yet been called.*/ uint32_t body_streaming_elg_allocated : 1; + /* Whether or not a S3 Express provider is active with the client.*/ + uint32_t s3express_provider_active : 1; + /* True if client has been flagged to finish destroying itself. Used to catch double-destroy bugs.*/ uint32_t finish_destroy : 1; + /* Whether or not endpoints cleanup task is currently scheduled. */ + uint32_t endpoints_cleanup_task_scheduled : 1; + + struct aws_s3_upload_part_timeout_stats upload_part_stats; } synced_data; struct { @@ -339,6 +398,8 @@ struct aws_s3_client { /* Number of requests currently being prepared. */ uint32_t num_requests_being_prepared; + /* Whether or not work processing is currently scheduled. */ + uint32_t trim_buffer_pool_task_scheduled : 1; } threaded_data; }; @@ -428,11 +489,23 @@ struct aws_s3_endpoint *aws_s3_endpoint_acquire(struct aws_s3_endpoint *endpoint * from the client's hashtable) */ void aws_s3_endpoint_release(struct aws_s3_endpoint *endpoint); +/* + * Destroys the endpoint. Before calling this function, the endpoint must be removed from the Client's hash table, and + * its ref count must be zero. You MUST NOT call this while the client's lock is held. + */ +void aws_s3_endpoint_destroy(struct aws_s3_endpoint *endpoint); + AWS_S3_API -extern const uint32_t g_max_num_connections_per_vip; +extern const uint32_t g_min_num_connections; AWS_S3_API -extern const uint32_t g_num_conns_per_vip_meta_request_look_up[]; +extern const size_t g_expect_timeout_offset_ms; + +AWS_S3_API +void aws_s3_client_update_upload_part_timeout( + struct aws_s3_client *client, + struct aws_s3_request *finished_upload_part_request, + int finished_error_code); AWS_EXTERN_C_END diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_copy_object.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_copy_object.h index a839e1fcc48..490a927cc55 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_copy_object.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_copy_object.h @@ -22,7 +22,7 @@ enum aws_s3_copy_object_request_tag { struct aws_s3_copy_object { struct aws_s3_meta_request base; - /* Useable after the Create Multipart Upload request succeeds. */ + /* Usable after the Create Multipart Upload request succeeds. */ struct aws_string *upload_id; /* Only meant for use in the update function, which is never called concurrently. */ @@ -32,7 +32,10 @@ struct aws_s3_copy_object { /* Members to only be used when the mutex in the base type is locked. */ struct { - struct aws_array_list etag_list; + /* Array-list of `struct aws_s3_mpu_part_info *`. + * If copying via multipart upload, we fill in this info as each part gets copied, + * and it's used to generate the final CompleteMultipartUpload. */ + struct aws_array_list part_list; /* obtained through a HEAD request against the source object */ uint64_t content_length; diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_default_meta_request.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_default_meta_request.h index 123c963b591..4ce93f91abb 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_default_meta_request.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_default_meta_request.h @@ -15,6 +15,12 @@ struct aws_s3_meta_request_default { size_t content_length; + /* Actual type for the single request (may be AWS_S3_REQUEST_TYPE_UNKNOWN) */ + enum aws_s3_request_type request_type; + + /* S3 operation name for the single request (NULL if unknown) */ + struct aws_string *operation_name; + /* Members to only be used when the mutex in the base type is locked. */ struct { int cached_response_status; @@ -30,6 +36,7 @@ struct aws_s3_meta_request_default { struct aws_s3_meta_request *aws_s3_meta_request_default_new( struct aws_allocator *allocator, struct aws_s3_client *client, + enum aws_s3_request_type request_type, uint64_t content_length, bool should_compute_content_md5, const struct aws_s3_meta_request_options *options); diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_objects.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_objects.h index e0b5d0cd0f9..ada72e6dfbd 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_objects.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_objects.h @@ -44,7 +44,7 @@ struct aws_s3_object_info { * Invoked when an object or prefix is encountered during a ListObjectsV2 API call. Return false, to immediately * terminate the list operation. Returning true will continue until at least the current page is iterated. */ -typedef bool(aws_s3_on_object_fn)(const struct aws_s3_object_info *info, void *user_data); +typedef int(aws_s3_on_object_fn)(const struct aws_s3_object_info *info, void *user_data); /** * Invoked upon the complete fetch and parsing of a page. If error_code is AWS_OP_SUCCESS and @@ -112,10 +112,6 @@ AWS_S3_API struct aws_s3_paginator *aws_s3_initiate_list_objects( struct aws_allocator *allocator, const struct aws_s3_list_objects_params *params); -AWS_S3_API struct aws_s3_paginated_operation *aws_s3_list_objects_operation_new( - struct aws_allocator *allocator, - const struct aws_s3_list_objects_params *params); - AWS_EXTERN_C_END #endif /* AWS_S3_LIST_OBJECTS_H */ diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_parts.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_parts.h index 30af99f3c4e..63e4062a559 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_parts.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_parts.h @@ -54,13 +54,15 @@ struct aws_s3_part_info { }; /** - * Invoked when a part is encountered during ListParts call. Return false, to immediately - * terminate the list operation. Returning true will continue until at least the current page is iterated. + * Invoked when a part is encountered during ListParts call. + * Return AWS_OP_ERR (after an error has been raised) to fail the list operation. + * Return AWS_OP_SUCCESS to continue until at least the current page is iterated. */ -typedef bool(aws_s3_on_part_fn)(const struct aws_s3_part_info *info, void *user_data); +typedef int(aws_s3_on_part_fn)(const struct aws_s3_part_info *info, void *user_data); /** - * Parameters for calling aws_s3_initiate_list_parts(). All values are copied out or re-seated and reference counted. + * Parameters for calling aws_s3_list_parts_operation_new(). All values are copied out or re-seated and reference + * counted. */ struct aws_s3_list_parts_params { /** @@ -88,10 +90,6 @@ struct aws_s3_list_parts_params { */ aws_s3_on_part_fn *on_part; /** - * Callback to invoke when each page of the bucket listing completes. - */ - aws_s3_on_page_finished_fn *on_list_finished; - /** * Associated user data. */ void *user_data; @@ -99,22 +97,6 @@ struct aws_s3_list_parts_params { AWS_EXTERN_C_BEGIN -/** - * Initiates a list objects command (without executing it), and returns a paginator object to iterate the bucket with if - * successful. - * - * Returns NULL on failure. Check aws_last_error() for details on the error that occurred. - * - * this is a reference counted object. It is returned with a reference count of 1. You must call - * aws_s3_paginator_release() on this object when you are finished with it. - * - * This does not start the actual list operation. You need to call aws_s3_paginator_continue() to start - * the operation. - */ -AWS_S3_API struct aws_s3_paginator *aws_s3_initiate_list_parts( - struct aws_allocator *allocator, - const struct aws_s3_list_parts_params *params); - AWS_S3_API struct aws_s3_paginated_operation *aws_s3_list_parts_operation_new( struct aws_allocator *allocator, const struct aws_s3_list_parts_params *params); diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_meta_request_impl.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_meta_request_impl.h index 1f616419212..3075973dc32 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_meta_request_impl.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_meta_request_impl.h @@ -22,7 +22,6 @@ struct aws_s3_client; struct aws_s3_connection; struct aws_s3_meta_request; struct aws_s3_request; -struct aws_s3_request_options; struct aws_http_headers; struct aws_http_make_request_options; struct aws_retry_strategy; @@ -45,10 +44,40 @@ typedef void(aws_s3_meta_request_prepare_request_callback_fn)( void *user_data); struct aws_s3_prepare_request_payload { + struct aws_allocator *allocator; struct aws_s3_request *request; + struct aws_task task; + /* async step: wait for vtable->prepare_request() call to complete */ + struct aws_future_void *asyncstep_prepare_request; + /* callback to invoke when all request preparation work is complete */ aws_s3_meta_request_prepare_request_callback_fn *callback; void *user_data; - struct aws_task task; +}; + +/* An event to be delivered on the meta-request's io_event_loop thread. */ +struct aws_s3_meta_request_event { + enum aws_s3_meta_request_event_type { + AWS_S3_META_REQUEST_EVENT_RESPONSE_BODY, /* body_callback */ + AWS_S3_META_REQUEST_EVENT_PROGRESS, /* progress_callback */ + AWS_S3_META_REQUEST_EVENT_TELEMETRY, /* telemetry_callback */ + } type; + + union { + /* data for AWS_S3_META_REQUEST_EVENT_RESPONSE_BODY */ + struct { + struct aws_s3_request *completed_request; + } response_body; + + /* data for AWS_S3_META_REQUEST_EVENT_PROGRESS */ + struct { + struct aws_s3_meta_request_progress info; + } progress; + + /* data for AWS_S3_META_REQUEST_EVENT_TELEMETRY */ + struct { + struct aws_s3_request_metrics *metrics; + } telemetry; + } u; }; struct aws_s3_meta_request_vtable { @@ -56,15 +85,19 @@ struct aws_s3_meta_request_vtable { * progress, false if there is not. */ bool (*update)(struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request); + /* Run vtable->prepare_request() on the meta-request's event loop. + * We do this because body streaming is slow, and we don't want it on our networking threads. + * The callback may fire on any thread (an async sub-step may run on another thread). */ void (*schedule_prepare_request)( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_s3_meta_request_prepare_request_callback_fn *callback, void *user_data); - /* Given a request, prepare it for sending (ie: creating the correct HTTP message, reading from a stream (if - * necessary), signing it, computing hashes, etc.) */ - int (*prepare_request)(struct aws_s3_meta_request *meta_request, struct aws_s3_request *request); + /* Given a request, asynchronously prepare it for sending + * (creating the correct HTTP message, reading from a stream (if necessary), computing hashes, etc.). + * Returns a future, which may complete on any thread (and may complete synchronously). */ + struct aws_future_void *(*prepare_request)(struct aws_s3_request *request); void (*init_signing_date_time)(struct aws_s3_meta_request *meta_request, struct aws_date_time *date_time); @@ -107,6 +140,15 @@ struct aws_s3_meta_request { /* Initial HTTP Message that this meta request is based on. */ struct aws_http_message *initial_request_message; + /* The meta request's outgoing body comes from one of these: + * 1) request_body_async_stream: if set, then async stream 1 part at a time + * 2) request_body_parallel_stream: if set, then stream multiple parts in parallel + * 3) request_body_using_async_writes: if set, then synchronously copy async_write data from 1 part at a time + * 4) initial_request_message's body_stream: else synchronously stream parts */ + struct aws_async_input_stream *request_body_async_stream; + struct aws_parallel_input_stream *request_body_parallel_stream; + bool request_body_using_async_writes; + /* Part size to use for uploads and downloads. Passed down by the creating client. */ const size_t part_size; @@ -118,7 +160,7 @@ struct aws_s3_meta_request { struct aws_s3_endpoint *endpoint; - /* Event loop to schedule IO work related on, ie, reading from streams, streaming parts back to the caller, etc.. + /* Event loop to schedule IO work related on, ie, reading from streams, streaming parts back to the caller, etc... * After the meta request is finished, this will be reset along with the client reference.*/ struct aws_event_loop *io_event_loop; @@ -131,6 +173,8 @@ struct aws_s3_meta_request { aws_s3_meta_request_finish_fn *finish_callback; aws_s3_meta_request_shutdown_fn *shutdown_callback; aws_s3_meta_request_progress_fn *progress_callback; + aws_s3_meta_request_telemetry_fn *telemetry_callback; + aws_s3_meta_request_upload_review_fn *upload_review_callback; /* Customer specified callbacks to be called by our specialized callback to calculate the response checksum. */ aws_s3_meta_request_headers_callback_fn *headers_user_callback_after_checksum; @@ -138,6 +182,7 @@ struct aws_s3_meta_request { aws_s3_meta_request_finish_fn *finish_user_callback_after_checksum; enum aws_s3_meta_request_type type; + struct aws_string *s3express_session_host; struct { struct aws_mutex lock; @@ -152,8 +197,8 @@ struct aws_s3_meta_request { /* The sum of initial_read_window, plus all window_increment() calls. This number never goes down. */ uint64_t read_window_running_total; - /* The next expected streaming part number needed to continue streaming part bodies. (For example, this will - * initially be 1 for part 1, and after that part is received, it will be 2, then 3, etc.. */ + /* The next expected streaming part number needed to continue streaming part bodies. (For example, this will + * initially be 1 for part 1, and after that part is received, it will be 2, then 3, etc.. )*/ uint32_t next_streaming_part; /* Number of parts scheduled for delivery. */ @@ -163,11 +208,19 @@ struct aws_s3_meta_request { * failed.)*/ uint32_t num_parts_delivery_completed; - /* Number of parts that have been successfully delivered to the caller. */ - uint32_t num_parts_delivery_succeeded; + /* Task for delivering events on the meta-request's io_event_loop thread. + * We do this to ensure a meta-request's callbacks are fired sequentially and non-overlapping. + * If `event_delivery_array` has items in it, then this task is scheduled. + * If `event_delivery_active` is true, then this task is actively running. + * Delivery is not 100% complete until `event_delivery_array` is empty AND `event_delivery_active` is false + * (use aws_s3_meta_request_are_events_out_for_delivery_synced() to check) */ + struct aws_task event_delivery_task; - /* Number of parts that have failed while trying to be delivered to the caller. */ - uint32_t num_parts_delivery_failed; + /* Array of `struct aws_s3_meta_request_event` to deliver when the `event_delivery_task` runs. */ + struct aws_array_list event_delivery_array; + + /* When true, events are actively being delivered to the user. */ + bool event_delivery_active; /* The end finish result of the meta request. */ struct aws_s3_meta_request_result finish_result; @@ -175,6 +228,38 @@ struct aws_s3_meta_request { /* True if the finish result has been set. */ uint32_t finish_result_set : 1; + /* To track aws_s3_requests with cancellable HTTP streams */ + struct aws_linked_list cancellable_http_streams_list; + + /* Data for async-writes. + * Currently, for a given meta request, only 1 async-write is allowed at a time. + * + * When the user calls write(), they may not provide enough data for us to send an UploadPart. + * In that case, we copy the data to a buffer and immediately mark the write complete, + * so the user can write more data, so we finally get enough to send. */ + struct { + /* The future for whatever async-write is pending. + * If this is NULL, there isn't enough data to send another part. + * + * If this is non-NULL, 1+ part requests can be sent. + * When all the data has been processed, this future is completed + * and cleared, and we can accept another write() call. */ + struct aws_future_void *future; + + /* True once user passes `eof` to their final write() call */ + bool eof; + + /* Holds buffered data we can't immediately send. + * The length will always be less than part-size */ + struct aws_byte_buf buffered_data; + + /* Cursor/pointer to data from the most-recent write() call, which + * provides enough data (combined with any buffered_data) to send 1+ parts. + * If there's data leftover in unbuffered_cursor after these parts are sent, + * it's copied into buffered_data, and we wait for more writes... */ + struct aws_byte_cursor unbuffered_cursor; + } async_write; + } synced_data; /* Anything in this structure should only ever be accessed by the client on its process work event loop task. */ @@ -189,6 +274,14 @@ struct aws_s3_meta_request { } client_process_work_threaded_data; + /* Anything in this structure should only ever be accessed by the meta-request from its io_event_loop thread. */ + struct { + /* When delivering events, we swap contents with `synced_data.event_delivery_array`. + * This is an optimization, we could have just copied the array when the task runs, + * but swapping two array-lists back and forth avoids an allocation. */ + struct aws_array_list event_delivery_array; + } io_threaded_data; + const bool should_compute_content_md5; /* deep copy of the checksum config. */ @@ -197,10 +290,18 @@ struct aws_s3_meta_request { /* checksum found in either a default get request, or in the initial head request of a multipart get */ struct aws_byte_buf meta_request_level_response_header_checksum; - /* running checksum of all of the parts of a default get, or ranged get meta request*/ + /* running checksum of all the parts of a default get, or ranged get meta request*/ struct aws_s3_checksum *meta_request_level_running_response_sum; }; +/* Info for each part, that we need to remember until we send CompleteMultipartUpload */ +struct aws_s3_mpu_part_info { + uint64_t size; + struct aws_string *etag; + struct aws_byte_buf checksum_base64; + bool was_previously_uploaded; +}; + AWS_EXTERN_C_BEGIN /* Initialize the base meta request structure. */ @@ -271,13 +372,6 @@ void aws_s3_meta_request_send_request_finish_default( struct aws_http_stream *stream, int error_code); -/* Implementation for when a request finishes a particular send to handle possible async error from S3. */ -AWS_S3_API -void aws_s3_meta_request_send_request_finish_handle_async_error( - struct aws_s3_connection *connection, - struct aws_http_stream *stream, - int error_code); - /* Called by the client when a request is completely finished and not doing any further retries. */ AWS_S3_API void aws_s3_meta_request_finished_request( @@ -287,16 +381,40 @@ void aws_s3_meta_request_finished_request( /* Called to place the request in the meta request's priority queue for streaming back to the caller. Once all requests * with a part number less than the given request has been received, the given request and the previous requests will - * scheduled for streaming. */ + * be scheduled for streaming. */ AWS_S3_API void aws_s3_meta_request_stream_response_body_synced( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request); -/* Read from the meta request's input stream. Should always be done outside of any mutex, as reading from the stream - * could cause user code to call back into aws-c-s3.*/ +/* Add an event for delivery on the meta-request's io_event_loop thread. + * These events usually correspond to callbacks that must fire sequentially and non-overlapping, + * such as delivery of a part's response body. */ +void aws_s3_meta_request_add_event_for_delivery_synced( + struct aws_s3_meta_request *meta_request, + const struct aws_s3_meta_request_event *event); + +/* Returns whether any events are out for delivery. + * The meta-request's finish callback must not be invoked until this returns false. */ +bool aws_s3_meta_request_are_events_out_for_delivery_synced(struct aws_s3_meta_request *meta_request); + +/* Cancel the requests with cancellable HTTP stream for the meta request */ +void aws_s3_meta_request_cancel_cancellable_requests_synced(struct aws_s3_meta_request *meta_request, int error_code); + +/* Asynchronously read from the meta request's input stream. Should always be done outside of any mutex, + * as reading from the stream could cause user code to call back into aws-c-s3. + * This will fill the buffer to capacity, unless end of stream is reached. + * It may read from the underlying stream multiple times, if that's what it takes to fill the buffer. + * Returns a future whose result bool indicates whether end of stream was reached. + * This future may complete on any thread, and may complete synchronously. + * + * Read from offset to fill the buffer + */ AWS_S3_API -int aws_s3_meta_request_read_body(struct aws_s3_meta_request *meta_request, struct aws_byte_buf *buffer); +struct aws_future_bool *aws_s3_meta_request_read_body( + struct aws_s3_meta_request *meta_request, + uint64_t offset, + struct aws_byte_buf *buffer); /* Set the meta request finish result as failed. This is meant to be called sometime before aws_s3_meta_request_finish. * Subsequent calls to this function or to aws_s3_meta_request_set_success_synced will not overwrite the end result of @@ -332,7 +450,7 @@ AWS_S3_API void aws_s3_meta_request_result_setup( struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_result *result, - struct aws_s3_request *request, + struct aws_s3_request *failed_request, int response_status, int error_code); diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_paginator.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_paginator.h index f51e9b8f669..d1a5a3f3f45 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_paginator.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_paginator.h @@ -31,8 +31,7 @@ typedef int(aws_s3_next_http_message_fn)( void *user_data, struct aws_http_message **out_message); -typedef bool( - aws_s3_on_result_node_encountered_fn)(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data); +typedef int(aws_s3_on_result_node_encountered_fn)(struct aws_xml_node *node, void *user_data); typedef void(aws_s3_on_page_finished_fn)(struct aws_s3_paginator *paginator, int error_code, void *user_data); @@ -84,14 +83,14 @@ struct aws_s3_paginator_params { */ struct aws_s3_paginated_operation_params { /** - * Name of the top level result node. Must not be NULL. + * Name of the top level result node. Must not be empty. */ - const struct aws_byte_cursor *result_xml_node_name; + struct aws_byte_cursor result_xml_node_name; /** - * Name of the continuation token node. Must not be NULL. + * Name of the continuation token node. Must not be empty. */ - const struct aws_byte_cursor *continuation_token_node_name; + struct aws_byte_cursor continuation_token_node_name; /** * Function to generate next message. diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_parallel_input_stream.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_parallel_input_stream.h new file mode 100644 index 00000000000..de7fa814a28 --- /dev/null +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_parallel_input_stream.h @@ -0,0 +1,105 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#ifndef AWS_S3_PARALLEL_INPUT_STREAM_H +#define AWS_S3_PARALLEL_INPUT_STREAM_H + +#include <aws/s3/s3.h> + +#include <aws/common/ref_count.h> + +AWS_PUSH_SANE_WARNING_LEVEL + +struct aws_byte_buf; +struct aws_future_bool; +struct aws_input_stream; + +struct aws_event_loop_group; + +struct aws_parallel_input_stream { + const struct aws_parallel_input_stream_vtable *vtable; + struct aws_allocator *alloc; + struct aws_ref_count ref_count; + + void *impl; +}; + +struct aws_parallel_input_stream_vtable { + /** + * Destroy the stream, its refcount has reached 0. + */ + void (*destroy)(struct aws_parallel_input_stream *stream); + + /** + * Read into the buffer in parallel. + * The implementation needs to support this to be invoked concurrently from multiple threads + */ + struct aws_future_bool *( + *read)(struct aws_parallel_input_stream *stream, uint64_t offset, struct aws_byte_buf *dest); +}; + +AWS_EXTERN_C_BEGIN + +/** + * Initialize aws_parallel_input_stream "base class" + */ +AWS_S3_API +void aws_parallel_input_stream_init_base( + struct aws_parallel_input_stream *stream, + struct aws_allocator *alloc, + const struct aws_parallel_input_stream_vtable *vtable, + void *impl); + +/** + * Increment reference count. + * You may pass in NULL (has no effect). + * Returns whatever pointer was passed in. + */ +AWS_S3_API +struct aws_parallel_input_stream *aws_parallel_input_stream_acquire(struct aws_parallel_input_stream *stream); + +/** + * Decrement reference count. + * You may pass in NULL (has no effect). + * Always returns NULL. + */ +AWS_S3_API +struct aws_parallel_input_stream *aws_parallel_input_stream_release(struct aws_parallel_input_stream *stream); + +/** + * Read from the offset until fill the dest, or EOF reached. + * It's thread safe to be called from multiple threads without waiting for other read to complete + * + * @param stream The stream to read from + * @param offset The offset in the stream from beginning to start reading + * @param dest The output buffer read to + * @return a future, which will contain an error code if something went wrong, + * or a result bool indicating whether EOF has been reached. + */ +AWS_S3_API +struct aws_future_bool *aws_parallel_input_stream_read( + struct aws_parallel_input_stream *stream, + uint64_t offset, + struct aws_byte_buf *dest); + +/** + * Create a new file based parallel input stream. + * + * This implementation will open a file handler when the read happens, and seek to the offset to start reading. Close + * the file handler as read finishes. + * + * @param allocator memory allocator + * @param file_name The file path to read from + * @return aws_parallel_input_stream + */ +AWS_S3_API +struct aws_parallel_input_stream *aws_parallel_input_stream_new_from_file( + struct aws_allocator *allocator, + struct aws_byte_cursor file_name); + +AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL + +#endif /* AWS_S3_PARALLEL_INPUT_STREAM_H */ diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_platform_info.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_platform_info.h new file mode 100644 index 00000000000..9e967128264 --- /dev/null +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_platform_info.h @@ -0,0 +1,80 @@ +#ifndef AWS_S3_S3_PLATFORM_INFO_H +#define AWS_S3_S3_PLATFORM_INFO_H +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include <aws/s3/s3.h> + +struct aws_s3_platform_info_loader; + +AWS_EXTERN_C_BEGIN + +/** + * Initializes and returns a loader for querying the compute platform for information needed for making configuration + * decisions. + * This will never be NULL. + */ +AWS_S3_API +struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_new(struct aws_allocator *allocator); + +AWS_S3_API +struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_acquire(struct aws_s3_platform_info_loader *loader); + +AWS_S3_API +struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_release(struct aws_s3_platform_info_loader *loader); + +/** + * Retrieves the pre-configured metadata for a given ec2 instance type. If no such pre-configuration exists, returns + * NULL. + */ +AWS_S3_API +const struct aws_s3_platform_info *aws_s3_get_platform_info_for_instance_type( + struct aws_s3_platform_info_loader *loader, + struct aws_byte_cursor instance_type_name); + +/** + * Retrieves the metadata for the current environment. If EC2 instance type is unknown, or it is not an EC2 instance at + * all, this value will still include the information about the system that could be determined. This value will never + * be NULL. + * This API is not thread safe. + */ +AWS_S3_API +const struct aws_s3_platform_info *aws_s3_get_platform_info_for_current_environment( + struct aws_s3_platform_info_loader *loader); + +/* + * Retrieves a list of EC2 instance types with recommended configuration. + * Returns aws_array_list<aws_byte_cursor>. The caller is responsible for cleaning up the array list. + */ +AWS_S3_API +struct aws_array_list aws_s3_get_recommended_platforms(struct aws_s3_platform_info_loader *loader); + +/** + * Returns true if the current process is running on an Amazon EC2 instance powered by Nitro. + */ +AWS_S3_API +bool aws_s3_is_running_on_ec2_nitro(struct aws_s3_platform_info_loader *loader); + +/** + * Returns an EC2 instance type assuming this executable is running on Amazon EC2 powered by nitro. + * + * First this function will check it's running on EC2 via. attempting to read DMI info to avoid making IMDS calls. + * + * If the function detects it's on EC2, and it was able to detect the instance type without a call to IMDS + * it will return it. + * + * Finally, it will call IMDS and return the instance type from there. + * + * Note that in the case of the IMDS call, a new client stack is spun up using 1 background thread. The call is made + * synchronously with a 1 second timeout: It's not cheap. To make this easier, the underlying result is cached + * internally and will be freed when aws_s3_library_clean_up() is called. + * @return byte_cursor containing the instance type. If this is empty, the instance type could not be determined. + */ +AWS_S3_API +struct aws_byte_cursor aws_s3_get_ec2_instance_type(struct aws_s3_platform_info_loader *loader, bool cached_only); + +AWS_EXTERN_C_END + +#endif /* AWS_S3_S3_PLATFORM_INFO_H */ diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request.h index aed5b1b3955..b77cf5231e1 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request.h @@ -9,8 +9,10 @@ #include <aws/common/byte_buf.h> #include <aws/common/linked_list.h> #include <aws/common/ref_count.h> +#include <aws/common/thread.h> #include <aws/s3/s3.h> +#include <aws/s3/private/s3_buffer_pool.h> #include <aws/s3/private/s3_checksums.h> struct aws_http_message; @@ -21,6 +23,88 @@ enum aws_s3_request_flags { AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS = 0x00000001, AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY = 0x00000002, AWS_S3_REQUEST_FLAG_ALWAYS_SEND = 0x00000004, + AWS_S3_REQUEST_FLAG_PART_SIZE_REQUEST_BODY = 0x00000008, +}; + +/** + * Information sent in the telemetry_callback after each aws_s3_request finished/retried from meta request. + */ +struct aws_s3_request_metrics { + struct aws_allocator *allocator; + + struct { + /* The time stamp when the request started by S3 client, which is prepared time by the client. Timestamps + * are from `aws_high_res_clock_get_ticks`. This will always be available. */ + int64_t start_timestamp_ns; + /* The time stamp when the request finished by S3 client succeed or failed or to be retried. Timestamps + * are from `aws_high_res_clock_get_ticks`. This will always be available. */ + int64_t end_timestamp_ns; + /* The time duration for the request from start to finish. end_timestamp_ns - start_timestamp_ns. This will + * always be available. */ + int64_t total_duration_ns; + + /* The time stamp when the request started to be encoded. -1 means data not available. Timestamp + * are from `aws_high_res_clock_get_ticks` */ + int64_t send_start_timestamp_ns; + /* The time stamp when the request finished to be encoded. -1 means data not available. + * Timestamp are from `aws_high_res_clock_get_ticks` */ + int64_t send_end_timestamp_ns; + /* The time duration for the request from start encoding to finish encoding (send_end_timestamp_ns - + * send_start_timestamp_ns). When send_end_timestamp_ns is -1, means data not available. */ + int64_t sending_duration_ns; + + /* The time stamp when the response started to be received from the network channel. -1 means data not + * available. Timestamp are from `aws_high_res_clock_get_ticks` */ + int64_t receive_start_timestamp_ns; + /* The time stamp when the response finished to be received from the network channel. -1 means data not + * available. Timestamp are from `aws_high_res_clock_get_ticks` */ + int64_t receive_end_timestamp_ns; + /* The time duration for the request from start receiving to finish receiving (receive_end_timestamp_ns - + * receive_start_timestamp_ns). When receive_end_timestamp_ns is 0, means data not available. */ + int64_t receiving_duration_ns; + + /* The time stamp when the request started to be signed. -1 means data not + * available. Timestamp are from `aws_high_res_clock_get_ticks` */ + int64_t sign_start_timestamp_ns; + /* The time stamp when the response finished to be signed. -1 means data not + * available. Timestamp are from `aws_high_res_clock_get_ticks` */ + int64_t sign_end_timestamp_ns; + /* The time duration for the request from start signing to finish signing (sign_end_timestamp_ns - + * sign_start_timestamp_ns). When sign_end_timestamp_ns is 0, means data not available. */ + int64_t signing_duration_ns; + } time_metrics; + + struct { + /* Response status code for the request */ + int response_status; + /* HTTP Headers of the response received. */ + struct aws_http_headers *response_headers; + /* Path and query of the request. */ + struct aws_string *request_path_query; + /* The host address of the request. */ + struct aws_string *host_address; + /* The the request ID header value. */ + struct aws_string *request_id; + /* S3 operation name for the request (NULL if unknown) */ + struct aws_string *operation_name; + /* The type of request made */ + enum aws_s3_request_type request_type; + } req_resp_info_metrics; + + struct { + /* The IP address of the request connected to */ + struct aws_string *ip_address; + /* The pointer to the connection that request was made from */ + void *connection_id; + /* The aws_thread_id_t to the thread that request ran on */ + aws_thread_id_t thread_id; + /* The stream-id, which is the idex when the stream was activated. */ + uint32_t stream_id; + /* CRT error code when the aws_s3_request finishes. */ + int error_code; + } crt_info_metrics; + + struct aws_ref_count ref_count; }; /* Represents a single request made to S3. */ @@ -29,6 +113,15 @@ struct aws_s3_request { /* Linked list node used for queuing. */ struct aws_linked_list_node node; + /* Linked list node used for tracking the request is active from HTTP level. */ + struct aws_linked_list_node cancellable_http_streams_list_node; + + /* The meta request lock must be held to access the data */ + struct { + /* The underlying http stream, only valid when the request is active from HTTP level */ + struct aws_http_stream *cancellable_http_stream; + } synced_data; + /* TODO Ref count on the request is no longer needed--only one part of code should ever be holding onto a request, * and we can just transfer ownership.*/ struct aws_ref_count ref_count; @@ -42,6 +135,8 @@ struct aws_s3_request { * retried.*/ struct aws_byte_buf request_body; + struct aws_s3_buffer_pool_ticket *ticket; + /* Beginning range of this part. */ /* TODO currently only used by auto_range_get, could be hooked up to auto_range_put as well. */ uint64_t part_range_start; @@ -56,6 +151,9 @@ struct aws_s3_request { */ uint32_t part_number; + /* The upload_timeout used. Zero, if the request is not a upload part */ + size_t upload_timeout_ms; + /* Number of times aws_s3_meta_request_prepare has been called for a request. During the first call to the virtual * prepare function, this will be 0.*/ uint32_t num_times_prepared; @@ -75,12 +173,21 @@ struct aws_s3_request { bool checksum_match; /* Tag that defines what the built request will actually consist of. This is meant to be space for an enum defined - * by the derived type. Request tags do not necessarily map 1:1 with actual S3 API requests. For example, they can + * by the derived type. Request tags do not necessarily map 1:1 with actual S3 API requests. (For example, they can * be more contextual, like "first part" instead of just "part".) */ - /* TODO: this should be a union type to make it clear that this could be one of two enums for puts, and gets. */ + /* TODO: Eliminate the concept of "request tag" and just use request_type. + * It's confusing having 2 concepts that are so similar. + * There's only 1 case where 2 tags used the same type, + * we can use some other bool/flag to differentiate this 1 case. */ int request_tag; + /* Actual S3 type for the single request (may be AWS_S3_REQUEST_TYPE_UNKNOWN) */ + enum aws_s3_request_type request_type; + + /* S3 operation name for the single request (e.g. "CompleteMultipartUpload") (NULL if unknown) */ + struct aws_string *operation_name; + /* Members of this structure will be repopulated each time the request is sent. If the request fails, and needs to * be retried, then the members of this structure will be cleaned up and re-populated on the next send. */ @@ -103,13 +210,18 @@ struct aws_s3_request { /* Returned response status of this request. */ int response_status; + /* The metrics for the request telemetry */ + struct aws_s3_request_metrics *metrics; } send_data; /* When true, response headers from the request will be stored in the request's response_headers variable. */ uint32_t record_response_headers : 1; /* When true, the response body buffer will be allocated in the size of a part. */ - uint32_t part_size_response_body : 1; + uint32_t has_part_size_response_body : 1; + + /* When true, the request body buffer will be allocated in the size of a part. */ + uint32_t has_part_size_request_body : 1; /* When true, this request is being tracked by the client for limiting the amount of in-flight-requests/stats. */ uint32_t tracked_by_client : 1; @@ -120,6 +232,19 @@ struct aws_s3_request { /* When true, this request is intended to find out the object size. This is currently only used by auto_range_get. */ uint32_t discovers_object_size : 1; + + /* When true, this request does not represent a useful http request and + * must not be sent, however client must still call corresponding finished + * callback for the request. Those requests can occur when request is + * optimistically created during update, but cannot be prepared. ex. when + * put has no content length, requests will be scheduled as regular to + * ensure fair distribution against other requests, but can also result in + * requests for uploading data after the end of the stream (those requests + * will use below flag to indicate that they should not be sent). */ + uint32_t is_noop : 1; + + /* When true, this request has already been uploaded. we still prepare the request to check the durability. */ + uint32_t was_previously_uploaded : 1; }; AWS_EXTERN_C_BEGIN @@ -129,6 +254,7 @@ AWS_S3_API struct aws_s3_request *aws_s3_request_new( struct aws_s3_meta_request *meta_request, int request_tag, + enum aws_s3_request_type request_type, uint32_t part_number, uint32_t flags); @@ -142,10 +268,16 @@ AWS_S3_API void aws_s3_request_clean_up_send_data(struct aws_s3_request *request); AWS_S3_API -void aws_s3_request_acquire(struct aws_s3_request *request); +struct aws_s3_request *aws_s3_request_acquire(struct aws_s3_request *request); + +AWS_S3_API +struct aws_s3_request *aws_s3_request_release(struct aws_s3_request *request); AWS_S3_API -void aws_s3_request_release(struct aws_s3_request *request); +struct aws_s3_request_metrics *aws_s3_request_metrics_new( + struct aws_allocator *allocator, + const struct aws_s3_request *request, + const struct aws_http_message *message); AWS_EXTERN_C_END diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request_messages.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request_messages.h index 5903ed75ef8..f0096754cb7 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request_messages.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request_messages.h @@ -37,13 +37,6 @@ struct aws_http_message *aws_s3_message_util_copy_http_message_no_body_filter_he size_t excluded_headers_size, bool exclude_x_amz_meta); -/* Copy message and retain all headers, but replace body with one that reads directly from a filepath. */ -AWS_S3_API -struct aws_http_message *aws_s3_message_util_copy_http_message_filepath_body_all_headers( - struct aws_allocator *allocator, - struct aws_http_message *message, - struct aws_byte_cursor filepath); - /* Copy headers from one message to the other and exclude specific headers. * exclude_x_amz_meta controls whether S3 user metadata headers (prefixed with "x-amz-meta) are excluded.*/ AWS_S3_API @@ -118,7 +111,7 @@ struct aws_http_message *aws_s3_upload_part_copy_message_new( bool should_compute_content_md5); /* Create an HTTP request for an S3 Complete-Multipart-Upload request. Creates the necessary XML payload using the - * passed in array list of ETags. (Each ETag is assumed to be an aws_string*) Buffer passed in will be used to store + * passed in array list of `struct aws_s3_mpu_part_info *`. Buffer passed in will be used to store * said XML payload, which will be used as the body. */ AWS_S3_API struct aws_http_message *aws_s3_complete_multipart_message_new( @@ -126,8 +119,7 @@ struct aws_http_message *aws_s3_complete_multipart_message_new( struct aws_http_message *base_message, struct aws_byte_buf *body_buffer, const struct aws_string *upload_id, - const struct aws_array_list *etags, - struct aws_byte_buf *checksums, + const struct aws_array_list *parts, enum aws_s3_checksum_algorithm algorithm); AWS_S3_API diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_util.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_util.h index 5fe22ff7409..d9c06b8a033 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_util.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_util.h @@ -24,20 +24,18 @@ #endif #define KB_TO_BYTES(kb) ((kb)*1024) #define MB_TO_BYTES(mb) ((mb)*1024 * 1024) +#define GB_TO_BYTES(gb) ((gb)*1024 * 1024 * 1024ULL) + +#define MS_TO_NS(ms) ((uint64_t)(ms)*1000000) +#define SEC_TO_NS(ms) ((uint64_t)(ms)*1000000000) struct aws_allocator; struct aws_http_stream; struct aws_http_headers; struct aws_http_message; -struct aws_event_loop; - -enum aws_s3_response_status { - AWS_S3_RESPONSE_STATUS_SUCCESS = 200, - AWS_S3_RESPONSE_STATUS_NO_CONTENT_SUCCESS = 204, - AWS_S3_RESPONSE_STATUS_RANGE_SUCCESS = 206, - AWS_S3_RESPONSE_STATUS_INTERNAL_ERROR = 500, - AWS_S3_RESPONSE_STATUS_SLOW_DOWN = 503, -}; +struct aws_s3_client; +struct aws_s3_request; +struct aws_s3_meta_request; struct aws_cached_signing_config_aws { struct aws_allocator *allocator; @@ -111,6 +109,12 @@ AWS_S3_API extern const struct aws_byte_cursor g_user_agent_header_product_name; AWS_S3_API +extern const struct aws_byte_cursor g_user_agent_header_platform; + +AWS_S3_API +extern const struct aws_byte_cursor g_user_agent_header_unknown; + +AWS_S3_API extern const struct aws_byte_cursor g_acl_header_name; AWS_S3_API @@ -141,10 +145,15 @@ AWS_S3_API extern const struct aws_byte_cursor g_s3_service_name; AWS_S3_API +extern const struct aws_byte_cursor g_s3express_service_name; + +AWS_S3_API extern const struct aws_byte_cursor g_range_header_name; extern const struct aws_byte_cursor g_if_match_header_name; +extern const struct aws_byte_cursor g_request_id_header_name; + AWS_S3_API extern const struct aws_byte_cursor g_content_range_header_name; @@ -152,6 +161,9 @@ AWS_S3_API extern const struct aws_byte_cursor g_accept_ranges_header_name; AWS_S3_API +extern const struct aws_byte_cursor g_mp_parts_count_header_name; + +AWS_S3_API extern const struct aws_byte_cursor g_post_method; AWS_S3_API @@ -160,17 +172,18 @@ extern const struct aws_byte_cursor g_head_method; AWS_S3_API extern const struct aws_byte_cursor g_delete_method; -extern const struct aws_byte_cursor g_error_body_xml_name; - -extern const struct aws_byte_cursor g_code_body_xml_name; - -extern const struct aws_byte_cursor g_s3_internal_error_code; - AWS_S3_API extern const uint32_t g_s3_max_num_upload_parts; +/** + * Cache and initial the signing config based on the client. + * + * @param client + * @param signing_config + * @return struct aws_cached_signing_config_aws* + */ struct aws_cached_signing_config_aws *aws_cached_signing_config_new( - struct aws_allocator *allocator, + struct aws_s3_client *client, const struct aws_signing_config_aws *signing_config); void aws_cached_signing_config_destroy(struct aws_cached_signing_config_aws *cached_signing_config); @@ -179,25 +192,35 @@ void aws_cached_signing_config_destroy(struct aws_cached_signing_config_aws *cac AWS_S3_API void copy_http_headers(const struct aws_http_headers *src, struct aws_http_headers *dest); -/* Get a top-level (exists directly under the root tag) tag value. */ -AWS_S3_API -struct aws_string *aws_xml_get_top_level_tag( - struct aws_allocator *allocator, - const struct aws_byte_cursor *tag_name, - struct aws_byte_cursor *xml_body); - -/* Get a top-level (exists directly under the root tag) tag value with expected root name. */ +/** + * Get content of XML element at path. + * + * path_name_array must be a C array of char*, with a NULL as its final entry. + * + * For example: + * Given `xml_doc`: "<Error><Code>SlowDown</Code></Error>" + * And `path_name_array`: {"Error", "Code", NULL} + * `out_body` will get set to: "SlowDown" + * + * Returns AWS_OP_SUCCESS or AWS_OP_ERR. + * Raises AWS_ERROR_STRING_MATCH_NOT_FOUND if path not found in XML, + * or AWS_ERROR_INVALID_XML if the XML can't be parsed. + * + * DO NOT make this function public without a lot of thought. + * The whole thing of passing a C-array of C-strings with a NULL sentinel + * is unconventional for this codebase. + */ AWS_S3_API -struct aws_string *aws_xml_get_top_level_tag_with_root_name( +int aws_xml_get_body_at_path( struct aws_allocator *allocator, - const struct aws_byte_cursor *tag_name, - const struct aws_byte_cursor *expected_root_name, - bool *out_root_name_mismatch, - struct aws_byte_cursor *xml_body); + struct aws_byte_cursor xml_doc, + const char *path_name_array[], + struct aws_byte_cursor *out_body); -/* replace " with escaped /" */ +/* replace " with escaped /" + * Returns initialized aws_byte_buf */ AWS_S3_API -void replace_quote_entities(struct aws_allocator *allocator, struct aws_string *str, struct aws_byte_buf *out_buf); +struct aws_byte_buf aws_replace_quote_entities(struct aws_allocator *allocator, struct aws_byte_cursor src); /* strip quotes if string is enclosed in quotes. does not remove quotes if they only appear on either side of the string */ @@ -228,27 +251,60 @@ int aws_s3_parse_content_length_response_header( struct aws_http_headers *response_headers, uint64_t *out_content_length); -/* Calculate the number of parts based on overall object-range and part_size. This takes into account aligning - * part-ranges on part_size. (ie: if object_range_start is not evenly divisible by part_size, it is considered in the - * middle of a contiguous part, and that first part will be smaller than part_size.) */ +/* + * Given the request headers list, finds the Range header and parses the range-start and range-end. All arguments are + * required. + * */ +AWS_S3_API +int aws_s3_parse_request_range_header( + struct aws_http_headers *request_headers, + bool *out_has_start_range, + bool *out_has_end_range, + uint64_t *out_start_range, + uint64_t *out_end_range); + +/* Calculate the number of parts based on overall object-range and part_size. */ AWS_S3_API -uint32_t aws_s3_get_num_parts(size_t part_size, uint64_t object_range_start, uint64_t object_range_end); +uint32_t aws_s3_calculate_auto_ranged_get_num_parts( + size_t part_size, + uint64_t first_part_size, + uint64_t object_range_start, + uint64_t object_range_end); + +/** + * Calculates the optimal part size and num parts given the 'content_length' and 'client_part_size'. + * This will increase the part size to stay within S3's number of parts. + * If the required part size exceeds the 'client_max_part_size' or + * if the system cannot support the required part size, it will raise an 'AWS_ERROR_INVALID_ARGUMENT' argument. + */ +AWS_S3_API +int aws_s3_calculate_optimal_mpu_part_size_and_num_parts( + uint64_t content_length, + size_t client_part_size, + uint64_t client_max_part_size, + size_t *out_part_size, + uint32_t *out_num_parts); /* Calculates the part range for a part given overall object range, size of each part, and the part's number. Note: part - * numbers begin at one. This takes into account aligning part-ranges on part_size. Intended to be used in conjunction - * with aws_s3_get_num_parts. part_number should be less than or equal to the result of aws_s3_get_num_parts. */ + * numbers begin at one. Intended to be used in conjunction + * with aws_s3_calculate_auto_ranged_get_num_parts. part_number should be less than or equal to the result of + * aws_s3_calculate_auto_ranged_get_num_parts. */ AWS_S3_API -void aws_s3_get_part_range( +void aws_s3_calculate_auto_ranged_get_part_range( uint64_t object_range_start, uint64_t object_range_end, size_t part_size, + uint64_t first_part_size, uint32_t part_number, uint64_t *out_part_range_start, uint64_t *out_part_range_end); /* Match the S3 error code to CRT error code, return AWS_ERROR_UNKNOWN when not matched */ AWS_S3_API -int aws_s3_crt_error_code_from_server_error_code_string(const struct aws_string *error_code_string); +int aws_s3_crt_error_code_from_server_error_code_string(struct aws_byte_cursor error_code_string); + +AWS_S3_API +void aws_s3_request_finish_up_metrics_synced(struct aws_s3_request *request, struct aws_s3_meta_request *meta_request); AWS_EXTERN_C_END diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3express_credentials_provider_impl.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3express_credentials_provider_impl.h new file mode 100644 index 00000000000..31f1ef76d09 --- /dev/null +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3express_credentials_provider_impl.h @@ -0,0 +1,118 @@ +#ifndef AWS_S3EXPRESS_CREDENTIALS_PROVIDER_IMPL_H +#define AWS_S3EXPRESS_CREDENTIALS_PROVIDER_IMPL_H + +#include <aws/common/hash_table.h> +#include <aws/common/mutex.h> +#include <aws/common/ref_count.h> +#include <aws/s3/s3_client.h> +#include <aws/s3/s3express_credentials_provider.h> + +struct aws_cache; + +/** + * Everything in the session should ONLY be accessed with lock HELD + */ +struct aws_s3express_session { + struct aws_allocator *allocator; + /* The hash key for the table storing creator and session. */ + struct aws_string *hash_key; + + /* The s3express credentials cached for the session */ + struct aws_credentials *s3express_credentials; + + /* Pointer to the creator if the session is in process creating */ + struct aws_s3express_session_creator *creator; + + /* The region and host of the session */ + struct aws_string *region; + struct aws_string *host; + bool inactive; + + /* Only used for mock tests */ + struct aws_s3express_credentials_provider_impl *impl; +}; + +struct aws_s3express_credentials_provider_impl { + struct aws_s3_client *client; + + /* Internal Refcount to make sure the provider out lives all the context. */ + struct aws_ref_count internal_ref; + + struct aws_task *bg_refresh_task; + struct aws_event_loop *bg_event_loop; + + const struct aws_credentials *default_original_credentials; + struct aws_credentials_provider *default_original_credentials_provider; + + struct { + /* Protected by the impl lock */ + struct aws_mutex lock; + /** + * Store the session creators in process. + * `struct aws_string *` as Key. `struct aws_s3express_session_creator *` as Value + */ + struct aws_hash_table session_creator_table; + /** + * An LRU cache to store all the sessions. + * `struct aws_string *` as Key. `struct aws_s3express_session *` as Value + **/ + struct aws_cache *cache; + bool destroying; + } synced_data; + + struct { + /* Overrides for testing purpose. */ + + struct aws_uri *endpoint_override; + uint64_t bg_refresh_secs_override; + + bool (*s3express_session_is_valid_override)(struct aws_s3express_session *session, uint64_t now_seconds); + bool (*s3express_session_about_to_expire_override)(struct aws_s3express_session *session, uint64_t now_seconds); + + /* The callback to be invoked before the real meta request finished callback for provider */ + aws_s3_meta_request_finish_fn *meta_request_finished_overhead; + } mock_test; +}; + +/** + * Configuration options for the default S3 Express credentials provider + */ +struct aws_s3express_credentials_provider_default_options { + /** + * The S3 client to fetch credentials. + * Note, the client is not owned by the provider, user should keep the s3 client outlive the provider. */ + struct aws_s3_client *client; + + /* Optional callback for shutdown complete of the provider */ + aws_simple_completion_callback *shutdown_complete_callback; + void *shutdown_user_data; + + struct { + uint64_t bg_refresh_secs_override; + } mock_test; +}; + +AWS_EXTERN_C_BEGIN +/** + * Create the default S3 Express credentials provider. + * + * @param allocator + * @return + */ +AWS_S3_API +struct aws_s3express_credentials_provider *aws_s3express_credentials_provider_new_default( + struct aws_allocator *allocator, + const struct aws_s3express_credentials_provider_default_options *options); + +/** + * Encode the hash key to be [host_value][hash_of_credentials] + * hash_of_credentials is the sha256 of [access_key][secret_access_key] + */ +AWS_S3_API +struct aws_string *aws_encode_s3express_hash_key_new( + struct aws_allocator *allocator, + const struct aws_credentials *original_credentials, + struct aws_byte_cursor host_value); + +AWS_EXTERN_C_END +#endif /* AWS_S3EXPRESS_CREDENTIALS_PROVIDER_IMPL_H */ diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3.h index 1d942cd49b6..4dc0fe905d6 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3.h @@ -10,6 +10,8 @@ #include <aws/io/logging.h> #include <aws/s3/exports.h> +AWS_PUSH_SANE_WARNING_LEVEL + #define AWS_C_S3_PACKAGE_ID 14 enum aws_s3_errors { @@ -35,6 +37,16 @@ enum aws_s3_errors { AWS_ERROR_S3_RESUME_FAILED, AWS_ERROR_S3_OBJECT_MODIFIED, AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR, + AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE, + AWS_ERROR_S3_INCORRECT_CONTENT_LENGTH, + AWS_ERROR_S3_REQUEST_TIME_TOO_SKEWED, + AWS_ERROR_S3_FILE_MODIFIED, + AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT, + AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG, + AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED, + AWS_ERROR_S3_INTERNAL_PART_SIZE_MISMATCH_RETRYING_WITH_RANGE, + AWS_ERROR_S3_REQUEST_HAS_COMPLETED, + AWS_ERROR_S3_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_S3_PACKAGE_ID) }; @@ -48,26 +60,43 @@ enum aws_s3_subject { AWS_LS_S3_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_S3_PACKAGE_ID) }; +struct aws_s3_platform_info; + struct aws_s3_cpu_group_info { /* group index, this usually refers to a particular numa node */ uint16_t cpu_group; /* array of network devices on this node */ - const struct aws_byte_cursor *nic_name_array; + struct aws_byte_cursor *nic_name_array; /* length of network devices array */ size_t nic_name_array_length; + size_t cpus_in_group; }; -struct aws_s3_compute_platform_info { +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable : 4626) /* assignment operator was implicitly defined as deleted */ +# pragma warning(disable : 5027) /* move assignment operator was implicitly defined as deleted */ +#endif + +struct aws_s3_platform_info { /* name of the instance-type: example c5n.18xlarge */ - const struct aws_byte_cursor instance_type; - /* max throughput for this instance type */ - uint16_t max_throughput_gbps; + struct aws_byte_cursor instance_type; + /* max throughput for this instance type, in gigabits per second */ + double max_throughput_gbps; /* array of cpu group info. This will always have at least one entry. */ - const struct aws_s3_cpu_group_info *cpu_group_info_array; + struct aws_s3_cpu_group_info *cpu_group_info_array; /* length of cpu group info array */ size_t cpu_group_info_array_length; + + /* The current build of this library specifically knows an optimal configuration for this + * platform */ + bool has_recommended_configuration; }; +#ifdef _MSC_VER +# pragma warning(pop) +#endif + AWS_EXTERN_C_BEGIN /** @@ -78,18 +107,33 @@ AWS_S3_API void aws_s3_library_init(struct aws_allocator *allocator); /** - * Retrieves the pre-configured metadata for an ec2 instance type. If no such pre-configuration exists, returns NULL. + * Shuts down the internal datastructures used by aws-c-s3. */ AWS_S3_API -struct aws_s3_compute_platform_info *aws_s3_get_compute_platform_info_for_instance_type( - const struct aws_byte_cursor instance_type_name); +void aws_s3_library_clean_up(void); -/** - * Shuts down the internal datastructures used by aws-c-s3. +/* + * Returns the aws_s3_platform_info for current platform + * NOTE: THIS API IS EXPERIMENTAL AND UNSTABLE */ AWS_S3_API -void aws_s3_library_clean_up(void); +const struct aws_s3_platform_info *aws_s3_get_current_platform_info(void); + +/* + * Returns the ec2 instance_type for current platform if possible + * NOTE: THIS API IS EXPERIMENTAL AND UNSTABLE + */ +AWS_S3_API +struct aws_byte_cursor aws_s3_get_current_platform_ec2_intance_type(bool cached_only); + +/* + * Retrieves a list of EC2 instance types with recommended configuration. + * Returns aws_array_list<aws_byte_cursor>. The caller is responsible for cleaning up the array list. + */ +AWS_S3_API +struct aws_array_list aws_s3_get_platforms_with_recommended_config(void); AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_S3_H */ diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3_client.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3_client.h index 6be3b9d669e..fc7f36dd0d3 100644 --- a/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3_client.h +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3_client.h @@ -7,9 +7,12 @@ */ #include <aws/auth/signing_config.h> +#include <aws/common/ref_count.h> #include <aws/io/retry_strategy.h> #include <aws/s3/s3.h> +AWS_PUSH_SANE_WARNING_LEVEL + struct aws_allocator; struct aws_http_stream; @@ -17,6 +20,7 @@ struct aws_http_message; struct aws_http_headers; struct aws_tls_connection_options; struct aws_input_stream; +struct aws_hash_table; struct aws_s3_client; struct aws_s3_request; @@ -26,6 +30,10 @@ struct aws_s3_meta_request_resume_token; struct aws_uri; struct aws_string; +struct aws_s3_request_metrics; +struct aws_s3express_credentials_provider; +struct aws_credentials_properties_s3express; + /** * A Meta Request represents a group of generated requests that are being done on behalf of the * original request. For example, one large GetObject request can be transformed into a series @@ -50,6 +58,16 @@ enum aws_s3_meta_request_type { /** * The PutObject request will be split into MultiPart uploads that are executed in parallel * to improve throughput, when possible. + * Note: put object supports both known and unknown body length. The client + * relies on Content-Length header to determine length of the body. + * Request with unknown content length are always sent using multipart + * upload regardless of final number of parts and do have the following limitations: + * - multipart threshold is ignored and all request are made through mpu, + * even if they only need one part + * - pause/resume is not supported + * - meta request will throw error if checksum header is provider (due to + * general limitation of checksum not being usable if meta request is + * getting split) */ AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, @@ -58,6 +76,12 @@ enum aws_s3_meta_request_type { * using multiple S3 UploadPartCopy requests in parallel, or bypasses * a CopyObject request to S3 if the object size is not large enough for * a multipart upload. + * Note: copy support is still in development and has following limitations: + * - host header must use virtual host addressing style (path style is not + * supported) and both source and dest buckets must have dns compliant name + * - only {bucket}/{key} format is supported for source and passing arn as + * source will not work + * - source bucket is assumed to be in the same region as dest */ AWS_S3_META_REQUEST_TYPE_COPY_OBJECT, @@ -65,11 +89,50 @@ enum aws_s3_meta_request_type { }; /** + * The type of a single S3 HTTP request. Used by metrics. + * A meta-request can make multiple S3 HTTP requests under the hood. + * + * For example, AWS_S3_META_REQUEST_TYPE_PUT_OBJECT for a large file will + * do multipart upload, resulting in 3+ HTTP requests: + * AWS_S3_REQUEST_TYPE_CREATE_MULTIPART_UPLOAD, one or more AWS_S3_REQUEST_TYPE_UPLOAD_PART, + * and finally AWS_S3_REQUEST_TYPE_COMPLETE_MULTIPART_UPLOAD. + * + * aws_s3_request_type_operation_name() returns the S3 operation name + * for types that map (e.g. AWS_S3_REQUEST_TYPE_HEAD_OBJECT -> "HeadObject"), + * or empty string for types that don't map (e.g. AWS_S3_REQUEST_TYPE_UNKNOWN -> ""). + */ +enum aws_s3_request_type { + /* The actual type of the single S3 HTTP request is unknown */ + AWS_S3_REQUEST_TYPE_UNKNOWN, + + /* S3 APIs */ + AWS_S3_REQUEST_TYPE_HEAD_OBJECT, + AWS_S3_REQUEST_TYPE_GET_OBJECT, + AWS_S3_REQUEST_TYPE_LIST_PARTS, + AWS_S3_REQUEST_TYPE_CREATE_MULTIPART_UPLOAD, + AWS_S3_REQUEST_TYPE_UPLOAD_PART, + AWS_S3_REQUEST_TYPE_ABORT_MULTIPART_UPLOAD, + AWS_S3_REQUEST_TYPE_COMPLETE_MULTIPART_UPLOAD, + AWS_S3_REQUEST_TYPE_UPLOAD_PART_COPY, + AWS_S3_REQUEST_TYPE_COPY_OBJECT, + AWS_S3_REQUEST_TYPE_PUT_OBJECT, + + /* Max enum value */ + AWS_S3_REQUEST_TYPE_MAX, + + /** @deprecated Use AWS_S3_REQUEST_TYPE_UNKNOWN if the actual S3 HTTP request type is unknown */ + AWS_S3_REQUEST_TYPE_DEFAULT = AWS_S3_REQUEST_TYPE_UNKNOWN, +}; + +/** * Invoked to provide response headers received during execution of the meta request, both for * success and error HTTP status codes. * * Return AWS_OP_SUCCESS to continue processing the request. - * Return AWS_OP_ERR to indicate failure and cancel the request. + * + * Return aws_raise_error(E) to cancel the request. + * The error you raise will be reflected in `aws_s3_meta_request_result.error_code`. + * If you're not sure which error to raise, use AWS_ERROR_S3_CANCELED. */ typedef int(aws_s3_meta_request_headers_callback_fn)( struct aws_s3_meta_request *meta_request, @@ -93,7 +156,10 @@ typedef int(aws_s3_meta_request_headers_callback_fn)( * No back-pressure is applied and data arrives as fast as possible. * * Return AWS_OP_SUCCESS to continue processing the request. - * Return AWS_OP_ERR to indicate failure and cancel the request. + * + * Return aws_raise_error(E) to cancel the request. + * The error you raise will be reflected in `aws_s3_meta_request_result.error_code`. + * If you're not sure which error to raise, use AWS_ERROR_S3_CANCELED. */ typedef int(aws_s3_meta_request_receive_body_callback_fn)( @@ -131,13 +197,27 @@ struct aws_s3_meta_request_progress { }; /** - * Invoked to report progress of multi-part upload and copy object requests. + * Invoked to report progress of a meta-request. + * For PutObject, progress refers to bytes uploaded. + * For CopyObject, progress refers to bytes copied. + * For GetObject, progress refers to bytes downloaded. + * For anything else, progress refers to response body bytes received. */ typedef void(aws_s3_meta_request_progress_fn)( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_progress *progress, void *user_data); +/** + * Invoked to report the telemetry of the meta request once a single request finishes. + * Note: *metrics is only valid for the duration of the callback. If you need to keep it around, use + * `aws_s3_request_metrics_acquire` + */ +typedef void(aws_s3_meta_request_telemetry_fn)( + struct aws_s3_meta_request *meta_request, + struct aws_s3_request_metrics *metrics, + void *user_data); + typedef void(aws_s3_meta_request_shutdown_fn)(void *user_data); typedef void(aws_s3_client_shutdown_complete_callback_fn)(void *user_data); @@ -168,6 +248,90 @@ enum aws_s3_checksum_location { AWS_SCL_TRAILER, }; +/** + * Info about a single part, for you to review before the upload completes. + */ +struct aws_s3_upload_part_review { + /* Size in bytes of this part */ + uint64_t size; + + /* Checksum string, as sent in the UploadPart request (usually base64-encoded): + * https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html#API_UploadPart_RequestSyntax + * This is empty if no checksum is used. */ + struct aws_byte_cursor checksum; +}; + +/** + * Info for you to review before an upload completes. + * + * WARNING: This feature is experimental/unstable. + * At this time, review is only available for multipart upload + * (when Content-Length is above the `multipart_upload_threshold`, + * or Content-Length not specified). + */ +struct aws_s3_upload_review { + /* The checksum algorithm used. */ + enum aws_s3_checksum_algorithm checksum_algorithm; + + /* Number of parts uploaded. */ + size_t part_count; + + /* Array of info about each part uploaded (array is `part_count` in length) */ + struct aws_s3_upload_part_review *part_array; +}; + +/** + * Optional callback, for you to review an upload before it completes. + * For example, you can review each part's checksum and fail the upload if + * you do not agree with them. + * + * @param meta_request pointer to the aws_s3_meta_request of the upload. + * @param info Detailed info about the upload. + * + * Return AWS_OP_SUCCESS to continue processing the request. + * + * Return aws_raise_error(E) to cancel the request. + * The error you raise will be reflected in `aws_s3_meta_request_result.error_code`. + * If you're not sure which error to raise, use AWS_ERROR_S3_CANCELED. + * + * WARNING: This feature is experimental/unstable. + * At this time, the callback is only invoked for multipart upload + * (when Content-Length is above the `multipart_upload_threshold`, + * or Content-Length not specified). + */ +typedef int(aws_s3_meta_request_upload_review_fn)( + struct aws_s3_meta_request *meta_request, + const struct aws_s3_upload_review *review, + void *user_data); + +/** + * The factory function for S3 client to create a S3 Express credentials provider. + * The S3 client will be the only owner of the S3 Express credentials provider. + * + * During S3 client destruction, S3 client will start the destruction of the provider, and wait the + * on_provider_shutdown_callback to be invoked before the S3 client finish destruction. + * + * Note to implement the factory properly: + * - Make sure `on_provider_shutdown_callback` will be invoked after the provider finish shutdown, otherwise, + * leak will happen. + * - The provider must not acquire a reference to the client; otherwise, a circular reference will cause a deadlock. + * - The `client` provided CANNOT be used within the factory function call or the destructor. + * + * @param allocator memory allocator to create the provider. + * @param client The S3 client uses and owns the provider. + * @param on_provider_shutdown_callback The callback to be invoked when the provider finishes shutdown. + * @param shutdown_user_data The user data to invoke shutdown callback with + * @param user_data The user data with the factory + * + * @return The aws_s3express_credentials_provider. + */ +typedef struct aws_s3express_credentials_provider *(aws_s3express_provider_factory_fn)( + struct aws_allocator *allocator, + struct aws_s3_client *client, + aws_simple_completion_callback on_provider_shutdown_callback, + void *shutdown_user_data, + void *factory_user_data); + /* Keepalive properties are TCP only. * If interval or timeout are zero, then default values are used. */ @@ -188,7 +352,7 @@ struct aws_s3_client_config { * throughput_target_gbps. (Recommended) */ uint32_t max_active_connections_override; - /* Region that the S3 bucket lives in. */ + /* Region that the client default to. */ struct aws_byte_cursor region; /* Client bootstrap used for common staples such as event loop group, host resolver, etc.. s*/ @@ -207,18 +371,59 @@ struct aws_s3_client_config { * is ENABLED, this is required. Otherwise, this is optional. */ struct aws_tls_connection_options *tls_connection_options; - /* Signing options to be used for each request. Specify NULL to not sign requests. */ + /** + * Required. + * Configure the signing for the requests made from the client. + * - Credentials or credentials provider is required. Other configs are all optional, and will be default to what + * needs to sign the request for S3, only overrides when Non-zero/Not-empty is set. + * - To skip signing, you can config it with anonymous credentials. + * - S3 Client will derive the right config for signing process based on this. + * + * Notes: + * - For AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, S3 client will use the credentials in the config to derive the + * S3 Express credentials that are used in the signing process. + * - For other auth algorithm, client may make modifications to signing config before passing it on to signer. + * + * TODO: deprecate this structure from auth, introduce a new S3 specific one. + */ struct aws_signing_config_aws *signing_config; - /* Size of parts the files will be downloaded or uploaded in. */ - size_t part_size; + /** + * Optional. + * Size of parts the object will be downloaded or uploaded in, in bytes. + * This only affects AWS_S3_META_REQUEST_TYPE_GET_OBJECT and AWS_S3_META_REQUEST_TYPE_PUT_OBJECT. + * If not set, this defaults to 8 MiB. + * The client will adjust the part size for AWS_S3_META_REQUEST_TYPE_PUT_OBJECT if needed for service limits (max + * number of parts per upload is 10,000, minimum upload part size is 5 MiB). + * + * You can also set this per meta-request, via `aws_s3_meta_request_options.part_size`. + */ + uint64_t part_size; - /* If the part size needs to be adjusted for service limits, this is the maximum size it will be adjusted to.. */ - size_t max_part_size; + /* If the part size needs to be adjusted for service limits, this is the maximum size it will be adjusted to. On 32 + * bit machine, it will be forced to SIZE_MAX, which is around 4GiB. The server limit is 5GiB, but object size limit + * is 5TiB for now. We should be good enough for all the cases. */ + uint64_t max_part_size; - /* Throughput target in Gbps that we are trying to reach. */ + /** + * Optional. + * The size threshold in bytes for when to use multipart uploads. + * Uploads larger than this will use the multipart upload strategy. + * Uploads smaller or equal to this will use a single HTTP request. + * This only affects AWS_S3_META_REQUEST_TYPE_PUT_OBJECT. + * If set, this should be at least `part_size`. + * If not set, maximal of `part_size` and 5 MiB will be used. + * + * You can also set this per meta-request, via `aws_s3_meta_request_options.multipart_upload_threshold`. + */ + uint64_t multipart_upload_threshold; + + /* Throughput target in gigabits per second (Gbps) that we are trying to reach. */ double throughput_target_gbps; + /* How much memory can we use. This will be capped to SIZE_MAX */ + uint64_t memory_limit_in_bytes; + /* Retry strategy to use. If NULL, a default retry strategy will be used. */ struct aws_retry_strategy *retry_strategy; @@ -292,6 +497,22 @@ struct aws_s3_client_config { * Ignored unless `enable_read_backpressure` is true. */ size_t initial_read_window; + + /** + * To enable S3 Express support or not. + */ + bool enable_s3express; + + /** + * Optional. + * Only used when `enable_s3express` is set. + * + * If set, client will invoke the factory to get the provider to use, when needed. + * + * If not set, client will create a default S3 Express provider under the hood. + */ + aws_s3express_provider_factory_fn *s3express_provider_override_factory; + void *factory_user_data; }; struct aws_s3_checksum_config { @@ -316,7 +537,7 @@ struct aws_s3_checksum_config { enum aws_s3_checksum_algorithm checksum_algorithm; /** - * Enable checksum mode header will be attached to get requests, this will tell s3 to send back checksums headers if + * Enable checksum mode header will be attached to GET requests, this will tell s3 to send back checksums headers if * they exist. Calculate the corresponding checksum on the response bodies. The meta request will finish with a did * validate field and set the error code to AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH if the calculated * checksum, and checksum found in the response header do not match. @@ -339,35 +560,114 @@ struct aws_s3_checksum_config { struct aws_array_list *validate_checksum_algorithms; }; -/* Options for a new meta request, ie, file transfer that will be handled by the high performance client. */ +/** + * Options for a new meta request, ie, file transfer that will be handled by the high performance client. + * + * There are several ways to pass the request's body data: + * 1) If the data is already in memory, set the body-stream on `message`. + * 2) If the data is on disk, set `send_filepath` for best performance. + * 3) If the data is available, but copying each chunk is asynchronous, set `send_async_stream`. + * 4) If you're not sure when each chunk of data will be available, use `send_using_async_writes`. + */ struct aws_s3_meta_request_options { - /* TODO: The meta request options cannot control the request to be split or not. Should consider to add one */ - /* The type of meta request we will be trying to accelerate. */ enum aws_s3_meta_request_type type; - /* Signing options to be used for each request created for this meta request. If NULL, options in the client will - * be used. If not NULL, these options will override the client options. */ + /** + * Optional. + * The S3 operation name (e.g. "CreateBucket"). + * This will only be used when type is AWS_S3_META_REQUEST_TYPE_DEFAULT; + * it is automatically populated for other meta-request types. + * This name is used to fill out details in metrics and error reports. + */ + struct aws_byte_cursor operation_name; + + /** + * Configure the signing for each request created for this meta request. If NULL, options in the client will be + * used. + * - The credentials will be obtained based on the precedence of: + * 1. `credentials` from `signing_config` in `aws_s3_meta_request_options` + * 2. `credentials_provider` from `signing_config` in `aws_s3_meta_request_options` + * 3. `credentials` from `signing_config` cached in the client + * 4. `credentials_provider` cached in the client + * - To skip signing, you can config it with anonymous credentials. + * - S3 Client will derive the right config for signing process based on this. + * + * Notes: + * - For AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, S3 client will use the credentials in the config to derive the + * S3 Express credentials that are used in the signing process. + * - For other auth algorithm, client may make modifications to signing config before passing it on to signer. + **/ const struct aws_signing_config_aws *signing_config; /* Initial HTTP message that defines what operation we are doing. - * When uploading a file, you should set `send_filepath` (instead of the message's body-stream) - * for better performance. */ + * Do not set the message's body-stream if the body is being passed by other means (see note above) */ struct aws_http_message *message; /** * Optional. - * If set, this file is sent as the request body, and the `message` body-stream is ignored. - * This can give better performance than sending data using the body-stream. + * If set, this file is sent as the request body. + * This gives the best performance when sending data from a file. + * Do not set if the body is being passed by other means (see note above). */ struct aws_byte_cursor send_filepath; /** + * Optional - EXPERIMENTAL/UNSTABLE + * If set, the request body comes from this async stream. + * Use this when outgoing data will be produced in asynchronous chunks. + * The S3 client will read from the stream whenever it's ready to upload another chunk. + * + * WARNING: The S3 client can deadlock if many async streams are "stalled", + * never completing their async read. If you're not sure when (if ever) + * data will be ready, use `send_using_async_writes` instead. + * + * Do not set if the body is being passed by other means (see note above). + */ + struct aws_async_input_stream *send_async_stream; + + /** + * Optional - EXPERIMENTAL/UNSTABLE + * Set this to send request body data using the async aws_s3_meta_request_write() function. + * Use this when outgoing data will be produced in asynchronous chunks, + * and you're not sure when (if ever) each chunk will be ready. + * + * This only works with AWS_S3_META_REQUEST_TYPE_PUT_OBJECT. + * + * Do not set if the body is being passed by other means (see note above). + */ + bool send_using_async_writes; + + /** * Optional. * if set, the flexible checksum will be performed by client based on the config. */ const struct aws_s3_checksum_config *checksum_config; + /** + * Optional. + * Size of parts the object will be downloaded or uploaded in, in bytes. + * This only affects AWS_S3_META_REQUEST_TYPE_GET_OBJECT and AWS_S3_META_REQUEST_TYPE_PUT_OBJECT. + * If not set, the value from `aws_s3_client_config.part_size` is used, which defaults to 8MiB. + * + * The client will adjust the part size for AWS_S3_META_REQUEST_TYPE_PUT_OBJECT if needed for service limits (max + * number of parts per upload is 10,000, minimum upload part size is 5 MiB). + */ + uint64_t part_size; + + /** + * Optional. + * The size threshold in bytes for when to use multipart uploads. + * Uploads larger than this will use the multipart upload strategy. + * Uploads smaller or equal to this will use a single HTTP request. + * This only affects AWS_S3_META_REQUEST_TYPE_PUT_OBJECT. + * If set, this should be at least `part_size`. + * If not set, `part_size` adjusted by client will be used as the threshold. + * If both `part_size` and `multipart_upload_threshold` are not set, + * the values from `aws_s3_client_config` are used. + */ + uint64_t multipart_upload_threshold; + /* User data for all callbacks. */ void *user_data; @@ -398,14 +698,29 @@ struct aws_s3_meta_request_options { /** * Invoked to report progress of the meta request execution. - * Currently, the progress callback is invoked only for the CopyObject meta request type. - * TODO: support this callback for all the types of meta requests - * See `aws_s3_meta_request_progress_fn` + * See `aws_s3_meta_request_progress_fn`. */ aws_s3_meta_request_progress_fn *progress_callback; /** * Optional. + * To get telemetry metrics when a single request finishes. + * If set the request will keep track of the metrics from `aws_s3_request_metrics`, and fire the callback when the + * request finishes receiving response. + * See `aws_s3_meta_request_telemetry_fn` + */ + aws_s3_meta_request_telemetry_fn *telemetry_callback; + + /** + * Optional. + * Callback for reviewing an upload before it completes. + * WARNING: experimental/unstable + * See `aws_s3_upload_review_fn` + */ + aws_s3_meta_request_upload_review_fn *upload_review_callback; + + /** + * Optional. * Endpoint override for request. Can be used to override scheme and port of * the endpoint. * There is some overlap between Host header and Endpoint and corner cases @@ -427,6 +742,15 @@ struct aws_s3_meta_request_options { * from the buffer and compare them them to previously uploaded part checksums. */ struct aws_s3_meta_request_resume_token *resume_token; + + /* + * Optional. + * Total object size hint, in bytes. + * The optimal strategy for downloading a file depends on its size. + * Set this hint to help the S3 client choose the best strategy for this particular file. + * This is just used as an estimate, so it's okay to provide an approximate value if the exact size is unknown. + */ + uint64_t *object_size_hint; }; /* Result details of a meta request. @@ -441,12 +765,23 @@ struct aws_s3_meta_request_options { */ struct aws_s3_meta_request_result { - /* HTTP Headers for the failed request that triggered finish of the meta request. NULL if no request failed. */ + /* If meta request failed due to an HTTP error response from S3, these are the headers. + * NULL if meta request failed for another reason. */ struct aws_http_headers *error_response_headers; - /* Response body for the failed request that triggered finishing of the meta request. NUll if no request failed.*/ + /* If meta request failed due to an HTTP error response from S3, this the body. + * NULL if meta request failed for another reason, or if the response had no body (such as a HEAD response). */ struct aws_byte_buf *error_response_body; + /* If meta request failed due to an HTTP error response from S3, + * this is the name of the S3 operation it was responding to. + * For example, if a AWS_S3_META_REQUEST_TYPE_PUT_OBJECT fails this could be + * "PutObject, "CreateMultipartUpload", "UploadPart", "CompleteMultipartUpload", or others. + * For AWS_S3_META_REQUEST_TYPE_DEFAULT, this is the same value passed to + * aws_s3_meta_request_options.operation_name. + * NULL if the meta request failed for another reason, or the operation name is not known. */ + struct aws_string *error_response_operation_name; + /* Response status of the failed request or of the entire meta request. */ int response_status; @@ -456,7 +791,7 @@ struct aws_s3_meta_request_result { * uploaded as a multipart object. * * If the object to get is multipart object, the part checksum MAY be validated if the part size to get matches the - * part size uploaded. In that case, if any part mismatch the checksum received, the meta request will failed with + * part size uploaded. In that case, if any part mismatch the checksum received, the meta request will fail with * checksum mismatch. However, even if the parts checksum were validated, this will NOT be set to true, as the * checksum for the whole meta request was NOT validated. **/ @@ -500,6 +835,50 @@ struct aws_s3_meta_request *aws_s3_client_make_meta_request( const struct aws_s3_meta_request_options *options); /** + * Write the next chunk of data. + * + * You must set `aws_s3_meta_request_options.send_using_async_writes` to use this function. + * + * This function is asynchronous, and returns a future (see <aws/io/future.h>). + * You may not call write() again until the future completes. + * + * If the future completes with an error code, then write() did not succeed + * and you should not call it again. If the future contains any error code, + * the meta request is guaranteed to finish soon (you don't need to worry about + * canceling the meta request yourself after a failed write). + * A common error code is AWS_ERROR_S3_REQUEST_HAS_COMPLETED, indicating + * the meta request completed for reasons unrelated to the write() call + * (e.g. CreateMultipartUpload received a 403 Forbidden response). + * AWS_ERROR_INVALID_STATE usually indicates that you're calling write() + * incorrectly (e.g. not waiting for previous write to complete). + * + * You MUST keep the data in memory until the future completes. + * If you need to free the memory early, call aws_s3_meta_request_cancel(). + * cancel() will synchronously complete the future from any pending write with + * error code AWS_ERROR_S3_REQUEST_HAS_COMPLETED. + * + * You can wait any length of time between calls to write(). + * If there's not enough data to upload a part, the data will be copied + * to a buffer and the future will immediately complete. + * + * @param meta_request Meta request + * + * @param data The data to send. The data can be any size. + * + * @param eof Pass true to signal EOF (end of file). + * Do not call write() again after passing true. + * + * This function never returns NULL. + * + * WARNING: This feature is experimental. + */ +AWS_S3_API +struct aws_future_void *aws_s3_meta_request_write( + struct aws_s3_meta_request *meta_request, + struct aws_byte_cursor data, + bool eof); + +/** * Increment the flow-control window, so that response data continues downloading. * * If the client was created with `enable_read_backpressure` set true, @@ -563,7 +942,7 @@ int aws_s3_meta_request_pause( */ struct aws_s3_upload_resume_token_options { struct aws_byte_cursor upload_id; /* Required */ - size_t part_size; /* Required */ + uint64_t part_size; /* Required. Must be less than SIZE_MAX */ size_t total_num_parts; /* Required */ /** @@ -612,7 +991,7 @@ enum aws_s3_meta_request_type aws_s3_meta_request_resume_token_type( * Part size associated with operation. */ AWS_S3_API -size_t aws_s3_meta_request_resume_token_part_size(struct aws_s3_meta_request_resume_token *resume_token); +uint64_t aws_s3_meta_request_resume_token_part_size(struct aws_s3_meta_request_resume_token *resume_token); /* * Total num parts associated with operation. @@ -653,12 +1032,200 @@ struct aws_s3_meta_request *aws_s3_meta_request_acquire(struct aws_s3_meta_reque AWS_S3_API struct aws_s3_meta_request *aws_s3_meta_request_release(struct aws_s3_meta_request *meta_request); +/** + * Initialize the configuration for a default S3 signing. + */ AWS_S3_API void aws_s3_init_default_signing_config( struct aws_signing_config_aws *signing_config, const struct aws_byte_cursor region, struct aws_credentials_provider *credentials_provider); +/** + * Return operation name for aws_s3_request_type, + * or empty string if the type doesn't map to an actual operation. + * For example: + * AWS_S3_REQUEST_TYPE_HEAD_OBJECT -> "HeadObject" + * AWS_S3_REQUEST_TYPE_UNKNOWN -> "" + * AWS_S3_REQUEST_TYPE_MAX -> "" + */ +AWS_S3_API +const char *aws_s3_request_type_operation_name(enum aws_s3_request_type type); + +/** + * Add a reference, keeping this object alive. + * The reference must be released when you are done with it, or it's memory will never be cleaned up. + * Always returns the same pointer that was passed in. + */ +AWS_S3_API +struct aws_s3_request_metrics *aws_s3_request_metrics_acquire(struct aws_s3_request_metrics *metrics); + +/** + * Release a reference. + * When the reference count drops to 0, this object will be cleaned up. + * It's OK to pass in NULL (nothing happens). + * Always returns NULL. + */ +AWS_S3_API +struct aws_s3_request_metrics *aws_s3_request_metrics_release(struct aws_s3_request_metrics *metrics); + +/************************************* Getters for s3 request metrics ************************************************/ +/** + * Get the request ID from aws_s3_request_metrics. + * If unavailable, AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised. + * If available, out_request_id will be set to a string. Be warned this string's lifetime is tied to the metrics + * object. + **/ +AWS_S3_API +int aws_s3_request_metrics_get_request_id( + const struct aws_s3_request_metrics *metrics, + const struct aws_string **out_request_id); + +/* Get the start time from aws_s3_request_metrics, which is when S3 client prepare the request to be sent. Always + * available. Timestamp are from `aws_high_res_clock_get_ticks` */ +AWS_S3_API +void aws_s3_request_metrics_get_start_timestamp_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *out_start_time); + +/* Get the end time from aws_s3_request_metrics. Always available */ +AWS_S3_API +void aws_s3_request_metrics_get_end_timestamp_ns(const struct aws_s3_request_metrics *metrics, uint64_t *out_end_time); + +/* Get the total duration time from aws_s3_request_metrics. Always available */ +AWS_S3_API +void aws_s3_request_metrics_get_total_duration_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *out_total_duration); + +/* Get the time stamp when the request started to be encoded. Timestamps are from `aws_high_res_clock_get_ticks` + * AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if the request ended before it gets sent. */ +AWS_S3_API +int aws_s3_request_metrics_get_send_start_timestamp_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *out_send_start_time); + +/* Get the time stamp when the request finished to be encoded. Timestamps are from `aws_high_res_clock_get_ticks` + * AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data not available. */ +AWS_S3_API +int aws_s3_request_metrics_get_send_end_timestamp_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *out_send_end_time); + +/* The time duration for the request from start encoding to finish encoding (send_end_timestamp_ns - + * send_start_timestamp_ns). + * AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data not available. */ +AWS_S3_API +int aws_s3_request_metrics_get_sending_duration_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *out_sending_duration); + +/* Get the time stamp when the response started to be received from the network channel. Timestamps are from + * `aws_high_res_clock_get_ticks` AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data not available. */ +AWS_S3_API +int aws_s3_request_metrics_get_receive_start_timestamp_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *out_receive_start_time); + +/* Get the time stamp when the response finished to be received from the network channel. Timestamps are from + * `aws_high_res_clock_get_ticks` AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data not available. */ +AWS_S3_API +int aws_s3_request_metrics_get_receive_end_timestamp_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *out_receive_end_time); + +/* The time duration for the request from start receiving to finish receiving (receive_end_timestamp_ns - + * receive_start_timestamp_ns). + * AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data not available. */ +AWS_S3_API +int aws_s3_request_metrics_get_receiving_duration_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *out_receiving_duration); + +/* Get the response status code for the request. AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data not + * available. */ +AWS_S3_API +int aws_s3_request_metrics_get_response_status_code( + const struct aws_s3_request_metrics *metrics, + int *out_response_status); + +/* Get the HTTP Headers of the response received for the request. AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised + * if data not available. */ +AWS_S3_API +int aws_s3_request_metrics_get_response_headers( + const struct aws_s3_request_metrics *metrics, + struct aws_http_headers **out_response_headers); + +/** + * Get the path and query of the request. + * If unavailable, AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised. + * If available, out_request_path_query will be set to a string. Be warned this string's lifetime is tied to the metrics + * object. + */ +AWS_S3_API +void aws_s3_request_metrics_get_request_path_query( + const struct aws_s3_request_metrics *metrics, + const struct aws_string **out_request_path_query); + +/** + * Get the host_address of the request. + * If unavailable, AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised. + * If available, out_host_address will be set to a string. Be warned this string's lifetime is tied to the metrics + * object. + */ +AWS_S3_API +void aws_s3_request_metrics_get_host_address( + const struct aws_s3_request_metrics *metrics, + const struct aws_string **out_host_address); + +/** + * Get the IP address of the request connected to. + * If unavailable, AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised. + * If available, out_ip_address will be set to a string. Be warned this string's lifetime is tied to the metrics object. + */ +AWS_S3_API +int aws_s3_request_metrics_get_ip_address( + const struct aws_s3_request_metrics *metrics, + const struct aws_string **out_ip_address); + +/* Get the id of connection that request was made from. AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data + * not available */ +AWS_S3_API +int aws_s3_request_metrics_get_connection_id(const struct aws_s3_request_metrics *metrics, size_t *out_connection_id); + +/* Get the thread ID of the thread that request was made from. AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if + * data not available */ +AWS_S3_API +int aws_s3_request_metrics_get_thread_id(const struct aws_s3_request_metrics *metrics, aws_thread_id_t *out_thread_id); + +/* Get the stream-id, which is the idex when the stream was activated. AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be + * raised if data not available */ +AWS_S3_API +int aws_s3_request_metrics_get_request_stream_id(const struct aws_s3_request_metrics *metrics, uint32_t *out_stream_id); + +/** + * Get the S3 operation name of the request (e.g. "HeadObject"). + * If unavailable, AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised. + * If available, out_operation_name will be set to a string. + * Be warned this string's lifetime is tied to the metrics object. + */ +AWS_S3_API +int aws_s3_request_metrics_get_operation_name( + const struct aws_s3_request_metrics *metrics, + const struct aws_string **out_operation_name); + +/* Get the request type from request metrics. + * If you just need a string, aws_s3_request_metrics_get_operation_name() is more reliable. */ +AWS_S3_API +void aws_s3_request_metrics_get_request_type( + const struct aws_s3_request_metrics *metrics, + enum aws_s3_request_type *out_request_type); + +/* Get the AWS CRT error code from request metrics. */ +AWS_S3_API +int aws_s3_request_metrics_get_error_code(const struct aws_s3_request_metrics *metrics); + AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_S3_CLIENT_H */ diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3express_credentials_provider.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3express_credentials_provider.h new file mode 100644 index 00000000000..316a3e240bb --- /dev/null +++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3express_credentials_provider.h @@ -0,0 +1,102 @@ +#ifndef AWS_S3EXPRESS_CREDENTIALS_PROVIDER_H +#define AWS_S3EXPRESS_CREDENTIALS_PROVIDER_H + +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include <aws/auth/credentials.h> +#include <aws/common/ref_count.h> +#include <aws/s3/s3.h> + +AWS_PUSH_SANE_WARNING_LEVEL + +struct aws_s3_client; +struct aws_s3express_credentials_provider; + +struct aws_credentials_properties_s3express { + /** + * Required. + * The host address of the s3 bucket for the request. + */ + struct aws_byte_cursor host; + /** + * Optional. + * The region of the bucket. + * If empty, the region of the S3 client will be used. + */ + struct aws_byte_cursor region; +}; + +struct aws_s3express_credentials_provider_vtable { + /** + * Implementation for S3 Express provider to get S3 Express credentials + */ + int (*get_credentials)( + struct aws_s3express_credentials_provider *provider, + const struct aws_credentials *original_credentials, + const struct aws_credentials_properties_s3express *properties, + aws_on_get_credentials_callback_fn callback, + void *user_data); + + /** + * Implementation to destroy the provider. + */ + void (*destroy)(struct aws_s3express_credentials_provider *provider); +}; + +struct aws_s3express_credentials_provider { + struct aws_s3express_credentials_provider_vtable *vtable; + struct aws_allocator *allocator; + /* Optional callback for shutdown complete of the provider */ + aws_simple_completion_callback *shutdown_complete_callback; + void *shutdown_user_data; + void *impl; + struct aws_ref_count ref_count; +}; + +AWS_EXTERN_C_BEGIN + +AWS_S3_API +struct aws_s3express_credentials_provider *aws_s3express_credentials_provider_release( + struct aws_s3express_credentials_provider *provider); + +/** + * To initialize the provider with basic vtable and refcount. And hook up the refcount with vtable functions. + * + * @param provider + * @param allocator + * @param vtable + * @param impl Optional, the impl for the provider + * @return AWS_S3_API + */ +AWS_S3_API +void aws_s3express_credentials_provider_init_base( + struct aws_s3express_credentials_provider *provider, + struct aws_allocator *allocator, + struct aws_s3express_credentials_provider_vtable *vtable, + void *impl); + +/** + * Async function for retrieving specific credentials based on properties. + * + * @param provider aws_s3express_credentials_provider provider to source from + * @param original_credentials The credentials used to derive the credentials for S3 Express. + * @param properties Specific properties for credentials being fetched. + * @param user_data user data to pass to the completion callback + * + * callback will only be invoked if-and-only-if the return value was AWS_OP_SUCCESS. + * + */ +AWS_S3_API int aws_s3express_credentials_provider_get_credentials( + struct aws_s3express_credentials_provider *provider, + const struct aws_credentials *original_credentials, + const struct aws_credentials_properties_s3express *properties, + aws_on_get_credentials_callback_fn callback, + void *user_data); + +AWS_EXTERN_C_END +AWS_POP_SANE_WARNING_LEVEL + +#endif /* AWS_S3EXPRESS_CREDENTIALS_PROVIDER_H */ diff --git a/contrib/restricted/aws/aws-c-s3/source/s3.c b/contrib/restricted/aws/aws-c-s3/source/s3.c index a3a71166072..2ca1b3d75db 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3.c @@ -3,6 +3,7 @@ * SPDX-License-Identifier: Apache-2.0. */ +#include <aws/s3/private/s3_platform_info.h> #include <aws/s3/s3.h> #include <aws/auth/auth.h> @@ -31,11 +32,21 @@ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH, "response checksum header does not match calculated checksum"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_CHECKSUM_CALCULATION_FAILED, "failed to calculate a checksum for the provided stream"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_PAUSED, "Request successfully paused"), - AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED, "Failed to parse result from list parts"), + AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED, "Failed to parse response from ListParts"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_RESUMED_PART_CHECKSUM_MISMATCH, "Checksum does not match previously uploaded part"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_RESUME_FAILED, "Resuming request failed"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_OBJECT_MODIFIED, "The object modifed during download."), - AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR, "Async error received from S3 and not recoverable from retry.") + AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR, "Async error received from S3 and not recoverable from retry."), + AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE, "The metric data is not available, the requests ends before the metric happens."), + AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INCORRECT_CONTENT_LENGTH, "Request body length must match Content-Length header."), + AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_REQUEST_TIME_TOO_SKEWED, "RequestTimeTooSkewed error received from S3."), + AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_FILE_MODIFIED, "The file was modified during upload."), + AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT, "Request was not created due to used memory exceeding memory limit."), + AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG, "Specified memory configuration is invalid for the system. " + "Memory limit should be at least 1GiB. Part size and max part size should be smaller than memory limit."), + AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED, "CreateSession call failed when signing with S3 Express."), + AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INTERNAL_PART_SIZE_MISMATCH_RETRYING_WITH_RANGE, "part_size mismatch, possibly due to wrong object_size_hint. Retrying with Range instead of partNumber."), + AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_REQUEST_HAS_COMPLETED, "Request has already completed, action cannot be performed."), }; /* clang-format on */ @@ -64,34 +75,9 @@ static struct aws_log_subject_info_list s_s3_log_subject_list = { .count = AWS_ARRAY_SIZE(s_s3_log_subject_infos), }; -/**** Configuration info for the c5n.18xlarge *****/ -static struct aws_byte_cursor s_c5n_18xlarge_nic_array[] = {AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth0")}; - -static struct aws_s3_cpu_group_info s_c5n_18xlarge_cpu_group_info_array[] = { - { - .cpu_group = 0u, - .nic_name_array = s_c5n_18xlarge_nic_array, - .nic_name_array_length = AWS_ARRAY_SIZE(s_c5n_18xlarge_nic_array), - }, - { - .cpu_group = 1u, - .nic_name_array = NULL, - .nic_name_array_length = 0u, - }, -}; - -static struct aws_s3_compute_platform_info s_c5n_18xlarge_platform_info = { - .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("c5n.18xlarge"), - .max_throughput_gbps = 100u, - .cpu_group_info_array = s_c5n_18xlarge_cpu_group_info_array, - .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_c5n_18xlarge_cpu_group_info_array), -}; -/****** End c5n.18xlarge *****/ - -static struct aws_hash_table s_compute_platform_info_table; - static bool s_library_initialized = false; static struct aws_allocator *s_library_allocator = NULL; +static struct aws_s3_platform_info_loader *s_loader; void aws_s3_library_init(struct aws_allocator *allocator) { if (s_library_initialized) { @@ -109,27 +95,21 @@ void aws_s3_library_init(struct aws_allocator *allocator) { aws_register_error_info(&s_error_list); aws_register_log_subject_info_list(&s_s3_log_subject_list); + s_loader = aws_s3_platform_info_loader_new(allocator); + AWS_FATAL_ASSERT(s_loader); + s_library_initialized = true; +} - AWS_FATAL_ASSERT( - !aws_hash_table_init( - &s_compute_platform_info_table, - allocator, - 32, - aws_hash_byte_cursor_ptr_ignore_case, - (bool (*)(const void *, const void *))aws_byte_cursor_eq_ignore_case, - NULL, - NULL) && - "Hash table init failed!"); - - AWS_FATAL_ASSERT( - !aws_hash_table_put( - &s_compute_platform_info_table, - &s_c5n_18xlarge_platform_info.instance_type, - &s_c5n_18xlarge_platform_info, - NULL) && - "hash table put failed!"); +const struct aws_s3_platform_info *aws_s3_get_current_platform_info(void) { + return aws_s3_get_platform_info_for_current_environment(s_loader); +} - s_library_initialized = true; +struct aws_byte_cursor aws_s3_get_current_platform_ec2_intance_type(bool cached_only) { + return aws_s3_get_ec2_instance_type(s_loader, cached_only); +} + +struct aws_array_list aws_s3_get_platforms_with_recommended_config(void) { + return aws_s3_get_recommended_platforms(s_loader); } void aws_s3_library_clean_up(void) { @@ -138,37 +118,12 @@ void aws_s3_library_clean_up(void) { } s_library_initialized = false; + s_loader = aws_s3_platform_info_loader_release(s_loader); aws_thread_join_all_managed(); - aws_hash_table_clean_up(&s_compute_platform_info_table); aws_unregister_log_subject_info_list(&s_s3_log_subject_list); aws_unregister_error_info(&s_error_list); aws_http_library_clean_up(); aws_auth_library_clean_up(); s_library_allocator = NULL; } - -struct aws_s3_compute_platform_info *aws_s3_get_compute_platform_info_for_instance_type( - const struct aws_byte_cursor instance_type_name) { - AWS_LOGF_TRACE( - AWS_LS_S3_GENERAL, - "static: looking up compute platform info for instance type " PRInSTR, - AWS_BYTE_CURSOR_PRI(instance_type_name)); - - struct aws_hash_element *platform_info_element = NULL; - aws_hash_table_find(&s_compute_platform_info_table, &instance_type_name, &platform_info_element); - - if (platform_info_element) { - AWS_LOGF_INFO( - AWS_LS_S3_GENERAL, - "static: found compute platform info for instance type " PRInSTR, - AWS_BYTE_CURSOR_PRI(instance_type_name)); - return platform_info_element->value; - } - - AWS_LOGF_INFO( - AWS_LS_S3_GENERAL, - "static: compute platform info for instance type " PRInSTR " not found", - AWS_BYTE_CURSOR_PRI(instance_type_name)); - return NULL; -} diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_get.c b/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_get.c index 75689aaa424..a71d418e387 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_get.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_get.c @@ -11,14 +11,13 @@ #include <aws/common/string.h> #include <inttypes.h> -#ifdef _MSC_VER -/* sscanf warning (not currently scanning for strings) */ -# pragma warning(disable : 4996) -#endif - +/* Dont use buffer pool when we know response size, and its below this number, + * i.e. when user provides explicit range that is small, ex. range = 1-100. + * Instead of going through the pool in that case, we just use a dynamic buffer + * for response (pre-mempool behavior). */ +const uint64_t s_min_size_response_for_pooling = 1 * 1024 * 1024; const uint32_t s_conservative_max_requests_in_flight = 8; const struct aws_byte_cursor g_application_xml_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("application/xml"); -const struct aws_byte_cursor g_object_size_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ActualObjectSize"); static void s_s3_meta_request_auto_ranged_get_destroy(struct aws_s3_meta_request *meta_request); @@ -27,9 +26,7 @@ static bool s_s3_auto_ranged_get_update( uint32_t flags, struct aws_s3_request **out_request); -static int s_s3_auto_ranged_get_prepare_request( - struct aws_s3_meta_request *meta_request, - struct aws_s3_request *request); +static struct aws_future_void *s_s3_auto_ranged_get_prepare_request(struct aws_s3_request *request); static void s_s3_auto_ranged_get_request_finished( struct aws_s3_meta_request *meta_request, @@ -54,10 +51,10 @@ static int s_s3_auto_ranged_get_success_status(struct aws_s3_meta_request *meta_ AWS_PRECONDITION(auto_ranged_get); if (auto_ranged_get->initial_message_has_range_header) { - return AWS_S3_RESPONSE_STATUS_RANGE_SUCCESS; + return AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT; } - return AWS_S3_RESPONSE_STATUS_SUCCESS; + return AWS_HTTP_STATUS_CODE_200_OK; } /* Allocate a new auto-ranged-get meta request. */ @@ -96,13 +93,36 @@ struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_get_new( struct aws_http_headers *headers = aws_http_message_get_headers(auto_ranged_get->base.initial_request_message); AWS_ASSERT(headers != NULL); - auto_ranged_get->initial_message_has_range_header = aws_http_headers_has(headers, g_range_header_name); + if (aws_http_headers_has(headers, g_range_header_name)) { + auto_ranged_get->initial_message_has_range_header = true; + if (aws_s3_parse_request_range_header( + headers, + &auto_ranged_get->initial_message_has_start_range, + &auto_ranged_get->initial_message_has_end_range, + &auto_ranged_get->initial_range_start, + &auto_ranged_get->initial_range_end)) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p Could not parse Range header for Auto-Ranged-Get Meta Request.", + (void *)auto_ranged_get); + goto on_error; + } + } auto_ranged_get->initial_message_has_if_match_header = aws_http_headers_has(headers, g_if_match_header_name); - + auto_ranged_get->synced_data.first_part_size = auto_ranged_get->base.part_size; + if (options->object_size_hint != NULL) { + auto_ranged_get->object_size_hint_available = true; + auto_ranged_get->object_size_hint = *options->object_size_hint; + } AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p Created new Auto-Ranged Get Meta Request.", (void *)&auto_ranged_get->base); return &auto_ranged_get->base; + +on_error: + /* This will also clean up the auto_ranged_get */ + aws_s3_meta_request_release(&(auto_ranged_get->base)); + return NULL; } static void s_s3_meta_request_auto_ranged_get_destroy(struct aws_s3_meta_request *meta_request) { @@ -114,6 +134,56 @@ static void s_s3_meta_request_auto_ranged_get_destroy(struct aws_s3_meta_request aws_mem_release(meta_request->allocator, auto_ranged_get); } +/* + * This function returns the type of first request which we will also use to discover overall object size. + */ +static enum aws_s3_auto_ranged_get_request_type s_s3_get_request_type_for_discovering_object_size( + const struct aws_s3_meta_request *meta_request) { + AWS_PRECONDITION(meta_request); + struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl; + AWS_ASSERT(auto_ranged_get); + + /* + * When we attempt to download an empty file using the `AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE` + * request type, the request fails with an empty file error. We then reset `object_range_known` + * (`object_range_empty` is set to true) and try to download the file again with + * `AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1`. We send another request, even though there is + * no body, to provide successful response headers to the user. If the file is still empty, successful response + * headers will be provided to the users. Otherwise, the newer version of the file will be downloaded. + */ + if (auto_ranged_get->synced_data.object_range_empty != 0) { + auto_ranged_get->synced_data.object_range_empty = 0; + return AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1; + } + + /* + * If a range header exists but has no start-range (i.e. Range: bytes=-100), we perform a HeadRequest. If the + * start-range is unknown, we could potentially execute a request from the end-range and keep that request around + * until the meta request finishes. However, this approach involves the complexity of managing backpressure. For + * simplicity, we execute a HeadRequest if the start-range is not specified. + */ + if (auto_ranged_get->initial_message_has_range_header != 0) { + return auto_ranged_get->initial_message_has_start_range + ? AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE + : AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT; + } + + /* If we don't need checksum validation, then discover the size of the object while trying to get the first part. */ + if (!meta_request->checksum_config.validate_response_checksum) { + return AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE; + } + + /* If the object_size_hint indicates that it is a small one part file, then try to get the file directly + * TODO: Bypass memory limiter so that we don't overallocate memory for small files + */ + if (auto_ranged_get->object_size_hint_available && auto_ranged_get->object_size_hint <= meta_request->part_size) { + return AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1; + } + + /* Otherwise, do a headObject so that we can validate checksum if the file was uploaded as a single part */ + return AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT; +} + static bool s_s3_auto_ranged_get_update( struct aws_s3_meta_request *meta_request, uint32_t flags, @@ -142,7 +212,7 @@ static bool s_s3_auto_ranged_get_update( /* auto-ranged-gets make use of body streaming, which will hold onto response bodies if parts earlier in * the file haven't arrived yet. This can potentially create a lot of backed up requests, causing us to * hit our global request limit. To help mitigate this, when the "conservative" flag is passed in, we - * only allow the total amount of requests being sent/streamed to be inside of a set limit. */ + * only allow the total amount of requests being sent/streamed to be inside a set limit. */ if (num_requests_in_flight > s_conservative_max_requests_in_flight) { goto has_work_remaining; } @@ -151,67 +221,110 @@ static bool s_s3_auto_ranged_get_update( /* If the overall range of the object that we are trying to retrieve isn't known yet, then we need to send a * request to figure that out. */ if (!auto_ranged_get->synced_data.object_range_known) { - - /* If there exists a range header or we require validation of the response checksum, we currently always - * do a head request first. - * S3 returns the checksum of the entire object from the HEAD response - * - * For the range header value could be parsed client-side, doing so presents a number of - * complications. For example, the given range could be an unsatisfiable range, and might not even - * specify a complete range. To keep things simple, we are currently relying on the service to handle - * turning the Range header into a Content-Range response header.*/ - bool head_object_required = auto_ranged_get->initial_message_has_range_header != 0 || - meta_request->checksum_config.validate_response_checksum; - - if (head_object_required) { - /* If the head object request hasn't been sent yet, then send it now. */ - if (!auto_ranged_get->synced_data.head_object_sent) { + if (auto_ranged_get->synced_data.head_object_sent || + auto_ranged_get->synced_data.num_parts_requested > 0) { + goto has_work_remaining; + } + struct aws_s3_buffer_pool_ticket *ticket = NULL; + switch (s_s3_get_request_type_for_discovering_object_size(meta_request)) { + case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT: + AWS_LOGF_INFO( + AWS_LS_S3_META_REQUEST, + "id=%p: Doing a HeadObject to discover the size of the object", + (void *)meta_request); request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT, - 0, - AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY); - - request->discovers_object_size = true; - + AWS_S3_REQUEST_TYPE_HEAD_OBJECT, + 0 /*part_number*/, + AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); auto_ranged_get->synced_data.head_object_sent = true; - } - } else if (auto_ranged_get->synced_data.num_parts_requested == 0) { - /* If we aren't using a head object, then discover the size of the object while trying to get the - * first part. */ - request = aws_s3_request_new( - meta_request, - AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART, - 1, - AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY); - - request->part_range_start = 0; - request->part_range_end = meta_request->part_size - 1; - request->discovers_object_size = true; - - ++auto_ranged_get->synced_data.num_parts_requested; - } + break; + case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1: + AWS_LOGF_INFO( + AWS_LS_S3_META_REQUEST, + "id=%p: Doing a 'GET_OBJECT_WITH_PART_NUMBER_1' to discover the size of the object and get " + "the first part", + (void *)meta_request); + ticket = aws_s3_buffer_pool_reserve(meta_request->client->buffer_pool, meta_request->part_size); + + if (ticket == NULL) { + goto has_work_remaining; + } - goto has_work_remaining; - } + request = aws_s3_request_new( + meta_request, + AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1, + AWS_S3_REQUEST_TYPE_GET_OBJECT, + 1 /*part_number*/, + AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY); + request->ticket = ticket; + ++auto_ranged_get->synced_data.num_parts_requested; + + break; + case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE: + AWS_LOGF_INFO( + AWS_LS_S3_META_REQUEST, + "id=%p: Doing a 'GET_OBJECT_WITH_RANGE' to discover the size of the object and get the " + "first part", + (void *)meta_request); + + uint64_t part_range_start = 0; + uint64_t first_part_size = meta_request->part_size; + if (auto_ranged_get->initial_message_has_range_header) { + /* + * Currently, we only discover the size of the object when the initial range header includes + * a start-range. If we ever implement skipping the HeadRequest for a Range request without + * a start-range, this will need to update. + */ + AWS_ASSERT(auto_ranged_get->initial_message_has_start_range); + part_range_start = auto_ranged_get->initial_range_start; + + if (auto_ranged_get->initial_message_has_end_range) { + first_part_size = aws_min_u64( + first_part_size, + auto_ranged_get->initial_range_end - auto_ranged_get->initial_range_start + 1); + } + + auto_ranged_get->synced_data.first_part_size = first_part_size; + } + AWS_LOGF_INFO( + AWS_LS_S3_META_REQUEST, + "id=%p: Doing a ranged get to discover the size of the object and get the first part", + (void *)meta_request); + + if (first_part_size >= s_min_size_response_for_pooling) { + /* Note: explicitly reserving the whole part size + * even if expect to receive less data. Pool will + * reserve the whole part size for it anyways, so no + * reason getting a smaller chunk. */ + ticket = aws_s3_buffer_pool_reserve( + meta_request->client->buffer_pool, (size_t)meta_request->part_size); + + if (ticket == NULL) { + goto has_work_remaining; + } + } else { + ticket = NULL; + } - /* If the object range is known and that range is empty, then we have an empty file to request. */ - if (auto_ranged_get->synced_data.object_range_start == 0 && - auto_ranged_get->synced_data.object_range_end == 0) { - if (auto_ranged_get->synced_data.get_without_range_sent) { - if (auto_ranged_get->synced_data.get_without_range_completed) { - goto no_work_remaining; - } else { - goto has_work_remaining; - } + request = aws_s3_request_new( + meta_request, + AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE, + AWS_S3_REQUEST_TYPE_GET_OBJECT, + 1 /*part_number*/, + AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY); + request->ticket = ticket; + request->part_range_start = part_range_start; + request->part_range_end = part_range_start + first_part_size - 1; /* range-end is inclusive */ + ++auto_ranged_get->synced_data.num_parts_requested; + break; + default: + AWS_FATAL_ASSERT( + 0 && "s_s3_get_request_type_for_discovering_object_size returned unexpected discover " + "object size request type"); } - request = aws_s3_request_new( - meta_request, - AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_INITIAL_MESSAGE, - 0, - AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); - - auto_ranged_get->synced_data.get_without_range_sent = true; + request->discovers_object_size = true; goto has_work_remaining; } @@ -247,16 +360,27 @@ static bool s_s3_auto_ranged_get_update( auto_ranged_get->synced_data.read_window_warning_issued = 0; } + struct aws_s3_buffer_pool_ticket *ticket = + aws_s3_buffer_pool_reserve(meta_request->client->buffer_pool, meta_request->part_size); + + if (ticket == NULL) { + goto has_work_remaining; + } + request = aws_s3_request_new( meta_request, - AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART, - auto_ranged_get->synced_data.num_parts_requested + 1, + AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE, + AWS_S3_REQUEST_TYPE_GET_OBJECT, + auto_ranged_get->synced_data.num_parts_requested + 1 /*part_number*/, AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY); - aws_s3_get_part_range( + request->ticket = ticket; + + aws_s3_calculate_auto_ranged_get_part_range( auto_ranged_get->synced_data.object_range_start, auto_ranged_get->synced_data.object_range_end, meta_request->part_size, + auto_ranged_get->synced_data.first_part_size, request->part_number, &request->part_range_start, &request->part_range_end); @@ -283,11 +407,6 @@ static bool s_s3_auto_ranged_get_update( goto has_work_remaining; } - if (auto_ranged_get->synced_data.get_without_range_sent && - !auto_ranged_get->synced_data.get_without_range_completed) { - goto has_work_remaining; - } - /* If some parts are still being delivered to the caller, then wait for those to finish. */ if (meta_request->synced_data.num_parts_delivery_completed < meta_request->synced_data.num_parts_delivery_sent) { @@ -311,12 +430,16 @@ static bool s_s3_auto_ranged_get_update( } no_work_remaining: + /* If some events are still being delivered to caller, then wait for those to finish */ + if (!work_remaining && aws_s3_meta_request_are_events_out_for_delivery_synced(meta_request)) { + work_remaining = true; + } if (!work_remaining) { aws_s3_meta_request_set_success_synced(meta_request, s_s3_auto_ranged_get_success_status(meta_request)); if (auto_ranged_get->synced_data.num_parts_checksum_validated == auto_ranged_get->synced_data.num_parts_requested) { - /* If we have validated the checksum for every parts, we set the meta request level checksum validation + /* If we have validated the checksum for every part, we set the meta request level checksum validation * result.*/ meta_request->synced_data.finish_result.did_validate = true; meta_request->synced_data.finish_result.validation_algorithm = auto_ranged_get->validation_algorithm; @@ -337,17 +460,18 @@ static bool s_s3_auto_ranged_get_update( return work_remaining; } -/* Given a request, prepare it for sending based on its description. */ -static int s_s3_auto_ranged_get_prepare_request( - struct aws_s3_meta_request *meta_request, - struct aws_s3_request *request) { - AWS_PRECONDITION(meta_request); +/* Given a request, prepare it for sending based on its description. + * Currently, this is actually synchronous. */ +static struct aws_future_void *s_s3_auto_ranged_get_prepare_request(struct aws_s3_request *request) { AWS_PRECONDITION(request); + struct aws_s3_meta_request *meta_request = request->meta_request; /* Generate a new ranged get request based on the original message. */ struct aws_http_message *message = NULL; struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl; + bool success = false; + switch (request->request_tag) { case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT: /* A head object will be a copy of the original headers but with a HEAD request method. */ @@ -357,16 +481,20 @@ static int s_s3_auto_ranged_get_prepare_request( aws_http_message_set_request_method(message, g_head_method); } break; - case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART: + case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE: message = aws_s3_ranged_get_object_message_new( meta_request->allocator, meta_request->initial_request_message, request->part_range_start, request->part_range_end); break; - case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_INITIAL_MESSAGE: + case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1: message = aws_s3_message_util_copy_http_message_no_body_all_headers( meta_request->allocator, meta_request->initial_request_message); + if (message) { + aws_s3_message_util_set_multipart_request_path( + meta_request->allocator, NULL, request->part_number, false, message); + } break; } @@ -376,7 +504,7 @@ static int s_s3_auto_ranged_get_prepare_request( "id=%p Could not create message for request with tag %d for auto-ranged-get meta request.", (void *)meta_request, request->request_tag); - goto message_alloc_failed; + goto finish; } if (meta_request->checksum_config.validate_response_checksum) { aws_http_headers_set(aws_http_message_get_headers(message), g_request_validation_mode, g_enabled); @@ -398,21 +526,30 @@ static int s_s3_auto_ranged_get_prepare_request( aws_s3_request_setup_send_data(request, message); aws_http_message_release(message); + /* Success! */ AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, - "id=%p: Created request %p for part %d", + "id=%p: Created request %p for part %d part sized %d", (void *)meta_request, (void *)request, - request->part_number); + request->part_number, + request->has_part_size_response_body); - return AWS_OP_SUCCESS; + success = true; -message_alloc_failed: - - return AWS_OP_ERR; +finish:; + struct aws_future_void *future = aws_future_void_new(meta_request->allocator); + if (success) { + aws_future_void_set_result(future); + } else { + aws_future_void_set_error(future, aws_last_error_or_unknown()); + } + return future; } -/* Check the finish result of meta request, in case of the request failed because of downloading an empty file */ +/* Check the finish result of meta request. + * Return true if the request failed because it downloaded an empty file. + * Return false if the request failed for any other reason */ static bool s_check_empty_file_download_error(struct aws_s3_request *failed_request) { struct aws_http_headers *failed_headers = failed_request->send_data.response_headers; struct aws_byte_buf failed_body = failed_request->send_data.response_body; @@ -423,12 +560,11 @@ static bool s_check_empty_file_download_error(struct aws_s3_request *failed_requ /* Content type found */ if (aws_byte_cursor_eq_ignore_case(&content_type, &g_application_xml_value)) { /* XML response */ - struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&failed_body); - struct aws_string *size = - aws_xml_get_top_level_tag(failed_request->allocator, &g_object_size_value, &body_cursor); - bool check_size = aws_string_eq_c_str(size, "0"); - aws_string_destroy(size); - if (check_size) { + struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(&failed_body); + const char *path_to_size[] = {"Error", "ActualObjectSize", NULL}; + struct aws_byte_cursor size = {0}; + aws_xml_get_body_at_path(failed_request->allocator, xml_doc, path_to_size, &size); + if (aws_byte_cursor_eq_c_str(&size, "0")) { return true; } } @@ -437,22 +573,28 @@ static bool s_check_empty_file_download_error(struct aws_s3_request *failed_requ return false; } -static int s_discover_object_range_and_content_length( +static int s_discover_object_range_and_size( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code, - uint64_t *out_total_content_length, uint64_t *out_object_range_start, - uint64_t *out_object_range_end) { - AWS_PRECONDITION(out_total_content_length); + uint64_t *out_object_range_end, + uint64_t *out_object_size, + uint64_t *out_first_part_size, + bool *out_empty_file_error) { + + AWS_PRECONDITION(out_object_size); AWS_PRECONDITION(out_object_range_start); AWS_PRECONDITION(out_object_range_end); + AWS_PRECONDITION(out_first_part_size); int result = AWS_OP_ERR; - uint64_t total_content_length = 0; + uint64_t content_length = 0; + uint64_t object_size = 0; uint64_t object_range_start = 0; uint64_t object_range_end = 0; + uint64_t first_part_size = 0; AWS_ASSERT(request->discovers_object_size); struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl; @@ -466,7 +608,7 @@ static int s_discover_object_range_and_content_length( /* There should be a Content-Length header that indicates the total size of the range.*/ if (aws_s3_parse_content_length_response_header( - meta_request->allocator, request->send_data.response_headers, &total_content_length)) { + meta_request->allocator, request->send_data.response_headers, &content_length)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, @@ -477,16 +619,19 @@ static int s_discover_object_range_and_content_length( } /* if the inital message had a ranged header, there should also be a Content-Range header that specifies the - * object range and total object size. Otherwise the size and range should be equal to the + * object range and total object size. Otherwise, the size and range should be equal to the * total_content_length. */ if (!auto_ranged_get->initial_message_has_range_header) { - object_range_end = total_content_length - 1; + object_size = content_length; + if (content_length > 0) { + object_range_end = content_length - 1; /* range-end is inclusive */ + } } else if (aws_s3_parse_content_range_response_header( meta_request->allocator, request->send_data.response_headers, &object_range_start, &object_range_end, - NULL)) { + &object_size)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, @@ -498,22 +643,58 @@ static int s_discover_object_range_and_content_length( result = AWS_OP_SUCCESS; break; - case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART: + case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1: + AWS_ASSERT(request->part_number == 1); + AWS_ASSERT(request->send_data.response_headers != NULL); + /* There should be a Content-Length header that indicates the size of first part. */ + if (aws_s3_parse_content_length_response_header( + meta_request->allocator, request->send_data.response_headers, &content_length)) { + + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p Could not find content-length header for request %p", + (void *)meta_request, + (void *)request); + break; + } + first_part_size = content_length; + + if (first_part_size > 0) { + /* Parse the object size from the part response. */ + if (aws_s3_parse_content_range_response_header( + meta_request->allocator, request->send_data.response_headers, NULL, NULL, &object_size)) { + + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p Could not find content-range header for request %p", + (void *)meta_request, + (void *)request); + break; + } + /* When discovering the object size via GET_OBJECT_WITH_PART_NUMBER_1, the object range is the entire + * object. */ + object_range_start = 0; + object_range_end = object_size - 1; /* range-end is inclusive */ + } + + result = AWS_OP_SUCCESS; + break; + case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE: AWS_ASSERT(request->part_number == 1); if (error_code != AWS_ERROR_SUCCESS) { /* If we hit an empty file while trying to discover the object-size via part, then this request failure * is as designed. */ - if (s_check_empty_file_download_error(request)) { + if (!auto_ranged_get->initial_message_has_range_header && s_check_empty_file_download_error(request)) { AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p Detected empty file with request %p. Sending new request without range header.", (void *)meta_request, (void *)request); - total_content_length = 0ULL; - + object_size = 0ULL; + *out_empty_file_error = true; result = AWS_OP_SUCCESS; } else { /* Otherwise, resurface the error code. */ @@ -526,7 +707,11 @@ static int s_discover_object_range_and_content_length( /* Parse the object size from the part response. */ if (aws_s3_parse_content_range_response_header( - meta_request->allocator, request->send_data.response_headers, NULL, NULL, &total_content_length)) { + meta_request->allocator, + request->send_data.response_headers, + &object_range_start, + &object_range_end, + &object_size)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, @@ -536,11 +721,17 @@ static int s_discover_object_range_and_content_length( break; } - - /* When discovering the object size via first-part, the object range is the entire object. */ - object_range_start = 0; - object_range_end = total_content_length - 1; - + if (auto_ranged_get->initial_message_has_range_header) { + if (auto_ranged_get->initial_message_has_end_range) { + object_range_end = aws_min_u64(object_size - 1, auto_ranged_get->initial_range_end); + } else { + object_range_end = object_size - 1; + } + } else { + /* When discovering the object size via GET_OBJECT_WITH_RANGE, the object range is the entire object. */ + object_range_start = 0; + object_range_end = object_size - 1; /* range-end is inclusive */ + } result = AWS_OP_SUCCESS; break; default: @@ -549,9 +740,10 @@ static int s_discover_object_range_and_content_length( } if (result == AWS_OP_SUCCESS) { - *out_total_content_length = total_content_length; + *out_object_size = object_size; *out_object_range_start = object_range_start; *out_object_range_end = object_range_end; + *out_first_part_size = first_part_size; } return result; @@ -568,25 +760,33 @@ static void s_s3_auto_ranged_get_request_finished( struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl; AWS_PRECONDITION(auto_ranged_get); - uint64_t total_content_length = 0ULL; uint64_t object_range_start = 0ULL; uint64_t object_range_end = 0ULL; + uint64_t object_size = 0ULL; + uint64_t first_part_size = 0ULL; bool found_object_size = false; bool request_failed = error_code != AWS_ERROR_SUCCESS; + bool first_part_size_mismatch = (error_code == AWS_ERROR_S3_INTERNAL_PART_SIZE_MISMATCH_RETRYING_WITH_RANGE); + bool empty_file_error = false; if (request->discovers_object_size) { - - /* Try to discover the object-range and content length.*/ - if (s_discover_object_range_and_content_length( - meta_request, request, error_code, &total_content_length, &object_range_start, &object_range_end)) { + /* Try to discover the object-range and object-size.*/ + if (s_discover_object_range_and_size( + meta_request, + request, + error_code, + &object_range_start, + &object_range_end, + &object_size, + &first_part_size, + &empty_file_error)) { error_code = aws_last_error_or_unknown(); goto update_synced_data; } - - if (!request_failed && !auto_ranged_get->initial_message_has_if_match_header) { + if ((!request_failed || first_part_size_mismatch) && !auto_ranged_get->initial_message_has_if_match_header) { AWS_ASSERT(auto_ranged_get->etag == NULL); struct aws_byte_cursor etag_header_value; @@ -609,22 +809,37 @@ static void s_s3_auto_ranged_get_request_finished( error_code = AWS_ERROR_SUCCESS; found_object_size = true; - if (meta_request->headers_callback != NULL) { + if (!empty_file_error && meta_request->headers_callback != NULL) { struct aws_http_headers *response_headers = aws_http_headers_new(meta_request->allocator); copy_http_headers(request->send_data.response_headers, response_headers); - /* If this request is a part, then the content range isn't applicable. */ - if (request->request_tag == AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART) { - /* For now, we can assume that discovery of size via the first part of the object does not apply to - * breaking up a ranged request. If it ever does, then we will need to repopulate this header. */ - AWS_ASSERT(!auto_ranged_get->initial_message_has_range_header); - - aws_http_headers_erase(response_headers, g_content_range_header_name); + if (request->request_tag == AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE || + request->request_tag == AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1) { + + if (auto_ranged_get->initial_message_has_range_header) { + /* Populate the header with object_range */ + char content_range_buffer[64] = ""; + snprintf( + content_range_buffer, + sizeof(content_range_buffer), + "bytes %" PRIu64 "-%" PRIu64 "/%" PRIu64, + object_range_start, + object_range_end, + object_size); + aws_http_headers_set( + response_headers, + g_content_range_header_name, + aws_byte_cursor_from_c_str(content_range_buffer)); + } else { + /* content range isn't applicable. */ + aws_http_headers_erase(response_headers, g_content_range_header_name); + } } + uint64_t content_length = object_size ? object_range_end - object_range_start + 1 : 0; char content_length_buffer[64] = ""; - snprintf(content_length_buffer, sizeof(content_length_buffer), "%" PRIu64, total_content_length); + snprintf(content_length_buffer, sizeof(content_length_buffer), "%" PRIu64, content_length); aws_http_headers_set( response_headers, g_content_length_header_name, aws_byte_cursor_from_c_str(content_length_buffer)); @@ -647,16 +862,25 @@ update_synced_data: /* BEGIN CRITICAL SECTION */ { aws_s3_meta_request_lock_synced_data(meta_request); + bool finishing_metrics = true; /* If the object range was found, then record it. */ if (found_object_size) { AWS_ASSERT(!auto_ranged_get->synced_data.object_range_known); - auto_ranged_get->synced_data.object_range_known = true; + auto_ranged_get->synced_data.object_range_empty = (object_size == 0); auto_ranged_get->synced_data.object_range_start = object_range_start; auto_ranged_get->synced_data.object_range_end = object_range_end; - auto_ranged_get->synced_data.total_num_parts = - aws_s3_get_num_parts(meta_request->part_size, object_range_start, object_range_end); + if (!first_part_size_mismatch && first_part_size) { + auto_ranged_get->synced_data.first_part_size = first_part_size; + } + if (auto_ranged_get->synced_data.object_range_empty == 0) { + auto_ranged_get->synced_data.total_num_parts = aws_s3_calculate_auto_ranged_get_num_parts( + meta_request->part_size, + auto_ranged_get->synced_data.first_part_size, + object_range_start, + object_range_end); + } } switch (request->request_tag) { @@ -664,7 +888,27 @@ update_synced_data: auto_ranged_get->synced_data.head_object_completed = true; AWS_LOGF_DEBUG(AWS_LS_S3_META_REQUEST, "id=%p Head object completed.", (void *)meta_request); break; - case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART: + case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1: + AWS_LOGF_DEBUG(AWS_LS_S3_META_REQUEST, "id=%p Get Part Number completed.", (void *)meta_request); + if (first_part_size_mismatch && found_object_size) { + /* We canceled GET_OBJECT_WITH_PART_NUMBER_1 request because the Content-Length was bigger than + * part_size. Try to fetch the first part again as a ranged get */ + auto_ranged_get->synced_data.num_parts_requested = 0; + break; + } + /* fall through */ + case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE: + if (empty_file_error) { + /* + * Try to download the object again using GET_OBJECT_WITH_PART_NUMBER_1. If the file is still + * empty, successful response headers will be provided to users. If not, the newer version of the + * file will be downloaded. + */ + auto_ranged_get->synced_data.num_parts_requested = 0; + auto_ranged_get->synced_data.object_range_known = 0; + break; + } + ++auto_ranged_get->synced_data.num_parts_completed; if (!request_failed) { @@ -680,7 +924,23 @@ update_synced_data: } ++auto_ranged_get->synced_data.num_parts_successful; + /* Send progress_callback for delivery on io_event_loop thread */ + if (meta_request->progress_callback != NULL) { + struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_PROGRESS}; + event.u.progress.info.bytes_transferred = request->send_data.response_body.len; + if (auto_ranged_get->synced_data.object_range_empty) { + event.u.progress.info.content_length = 0; + } else { + /* Note that range-end is inclusive */ + event.u.progress.info.content_length = auto_ranged_get->synced_data.object_range_end + 1 - + auto_ranged_get->synced_data.object_range_start; + } + aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); + } + aws_s3_meta_request_stream_response_body_synced(meta_request, request); + /* The body of the request is queued to be streamed, don't finish the metrics yet. */ + finishing_metrics = false; AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, @@ -693,11 +953,6 @@ update_synced_data: ++auto_ranged_get->synced_data.num_parts_failed; } break; - case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_INITIAL_MESSAGE: - AWS_LOGF_DEBUG( - AWS_LS_S3_META_REQUEST, "id=%p Get of file using initial message completed.", (void *)meta_request); - auto_ranged_get->synced_data.get_without_range_completed = true; - break; } if (error_code != AWS_ERROR_SUCCESS) { @@ -715,7 +970,9 @@ update_synced_data: meta_request->synced_data.finish_result.validation_algorithm = request->validation_algorithm; } } - + if (finishing_metrics) { + aws_s3_request_finish_up_metrics_synced(request, meta_request); + } aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_put.c b/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_put.c index 0386f86b04c..0b29421add3 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_put.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_put.c @@ -8,13 +8,35 @@ #include "aws/s3/private/s3_list_parts.h" #include "aws/s3/private/s3_request_messages.h" #include "aws/s3/private/s3_util.h" +#include <aws/common/clock.h> #include <aws/common/encoding.h> #include <aws/common/string.h> #include <aws/io/stream.h> -static const struct aws_byte_cursor s_upload_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("UploadId"); +/* TODO: better logging of steps */ + static const size_t s_complete_multipart_upload_init_body_size_bytes = 512; static const size_t s_abort_multipart_upload_init_body_size_bytes = 512; +/* For unknown length body we no longer know the number of parts. to avoid + * resizing arrays for etags/checksums too much, those array start out with + * capacity specified by the constant below. Note: constant has been arbitrary + * picked to avoid using allocations and using too much memory. might change in future. + */ +static const uint32_t s_unknown_length_default_num_parts = 32; + +/* Max number of parts (per meta-request) that can be: "started, but not done reading from stream". + * Though reads are serial (only 1 part can be reading from stream at a time) + * we may queue up more to minimize delays between each read. + * + * If this number is too low, there could be an avoidable delay between each read + * (meta-request ready for more work, but client hasn't run update and given it more work yet) + * + * If this number is too high, early meta-requests could hog all the "work tokens" + * (1st meta-request as queue of 100 "work tokens" that it needs to read + * the stream for, while later meta-requests are doing nothing waiting for work tokens) + * + * TODO: this value needs further benchmarking. */ +static const uint32_t s_max_parts_pending_read = 5; static const struct aws_byte_cursor s_create_multipart_upload_copy_headers[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"), @@ -22,35 +44,86 @@ static const struct aws_byte_cursor s_create_multipart_upload_copy_headers[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"), }; +/* Data for aws_s3_auto_ranged_put's async vtable->prepare_request() job */ +struct aws_s3_auto_ranged_put_prepare_request_job { + struct aws_allocator *allocator; + struct aws_s3_request *request; + /* async step: prepare type-specific message */ + struct aws_future_http_message *asyncstep_prepare_message; + /* future to set when this job completes */ + struct aws_future_void *on_complete; +}; + +/* Data for async preparation of an UploadPart request */ +struct aws_s3_prepare_upload_part_job { + struct aws_allocator *allocator; + struct aws_s3_request *request; + /* async step: read this part from input stream */ + struct aws_future_bool *asyncstep_read_part; + /* future to set when this job completes */ + struct aws_future_http_message *on_complete; +}; + +/* Data for async preparation of a CompleteMultipartUpload request */ +struct aws_s3_prepare_complete_multipart_upload_job { + struct aws_allocator *allocator; + struct aws_s3_request *request; + /* future to set when this job completes */ + struct aws_future_http_message *on_complete; +}; + static void s_s3_meta_request_auto_ranged_put_destroy(struct aws_s3_meta_request *meta_request); +static void s_s3_auto_ranged_put_send_request_finish( + struct aws_s3_connection *connection, + struct aws_http_stream *stream, + int error_code); + static bool s_s3_auto_ranged_put_update( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request); -static int s_s3_auto_ranged_put_prepare_request( - struct aws_s3_meta_request *meta_request, - struct aws_s3_request *request); +static struct aws_future_void *s_s3_auto_ranged_put_prepare_request(struct aws_s3_request *request); +static void s_s3_auto_ranged_put_prepare_request_finish(void *user_data); + +static struct aws_future_http_message *s_s3_prepare_list_parts(struct aws_s3_request *request); + +static struct aws_future_http_message *s_s3_prepare_create_multipart_upload(struct aws_s3_request *request); + +static struct aws_future_http_message *s_s3_prepare_upload_part(struct aws_s3_request *request); +static void s_s3_prepare_upload_part_on_read_done(void *user_data); +static void s_s3_prepare_upload_part_finish(struct aws_s3_prepare_upload_part_job *part_prep, int error_code); + +static struct aws_future_http_message *s_s3_prepare_complete_multipart_upload(struct aws_s3_request *request); + +static struct aws_future_http_message *s_s3_prepare_abort_multipart_upload(struct aws_s3_request *request); static void s_s3_auto_ranged_put_request_finished( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code); -static void s_s3_auto_ranged_put_send_request_finish( - struct aws_s3_connection *connection, - struct aws_http_stream *stream, - int error_code); - static int s_s3_auto_ranged_put_pause( struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_resume_token **resume_token); -static bool s_process_part_info(const struct aws_s3_part_info *info, void *user_data) { +static int s_process_part_info_synced(const struct aws_s3_part_info *info, void *user_data) { struct aws_s3_auto_ranged_put *auto_ranged_put = user_data; + struct aws_s3_meta_request *meta_request = &auto_ranged_put->base; - struct aws_string *etag = aws_strip_quotes(auto_ranged_put->base.allocator, info->e_tag); + ASSERT_SYNCED_DATA_LOCK_HELD(&auto_ranged_put->base); + + if (info->part_number == 0) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, "id=%p: ListParts reported Part without valid PartNumber", (void *)meta_request); + return aws_raise_error(AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED); + } + + struct aws_s3_mpu_part_info *part = aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_s3_mpu_part_info)); + part->size = info->size; + part->etag = aws_strip_quotes(meta_request->allocator, info->e_tag); + part->was_previously_uploaded = true; const struct aws_byte_cursor *checksum_cur = NULL; switch (auto_ranged_put->base.checksum_config.checksum_algorithm) { @@ -73,16 +146,23 @@ static bool s_process_part_info(const struct aws_s3_part_info *info, void *user_ break; } - if (checksum_cur) { - aws_byte_buf_init_copy_from_cursor( - &auto_ranged_put->encoded_checksum_list[info->part_number - 1], - auto_ranged_put->base.allocator, - *checksum_cur); + if ((checksum_cur != NULL) && (checksum_cur->len > 0)) { + aws_byte_buf_init_copy_from_cursor(&part->checksum_base64, auto_ranged_put->base.allocator, *checksum_cur); } - aws_array_list_set_at(&auto_ranged_put->synced_data.etag_list, &etag, info->part_number - 1); + /* Parts might be out of order or have gaps in them. + * Resize array-list to be long enough to hold this part, + * filling any intermediate slots with NULL. */ + aws_array_list_ensure_capacity(&auto_ranged_put->synced_data.part_list, info->part_number); + while (aws_array_list_length(&auto_ranged_put->synced_data.part_list) < info->part_number) { + struct aws_s3_mpu_part_info *null_part = NULL; + aws_array_list_push_back(&auto_ranged_put->synced_data.part_list, &null_part); + } + + /* Add this part */ + aws_array_list_set_at(&auto_ranged_put->synced_data.part_list, &part, info->part_number - 1); - return true; + return AWS_OP_SUCCESS; } /* @@ -168,14 +248,17 @@ static int s_try_init_resume_state_from_persisted_data( return AWS_OP_SUCCESS; } + AWS_FATAL_ASSERT(auto_ranged_put->has_content_length); + struct aws_byte_cursor request_path; if (aws_http_message_get_request_path(auto_ranged_put->base.initial_request_message, &request_path)) { AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Could not load persisted state. Request path could not be read."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } - auto_ranged_put->synced_data.num_parts_sent = 0; + auto_ranged_put->synced_data.num_parts_started = 0; auto_ranged_put->synced_data.num_parts_completed = 0; + auto_ranged_put->synced_data.num_parts_noop = 0; auto_ranged_put->synced_data.create_multipart_upload_sent = true; auto_ranged_put->synced_data.create_multipart_upload_completed = true; auto_ranged_put->upload_id = aws_string_clone_or_reuse(allocator, resume_token->multipart_upload_id); @@ -183,7 +266,7 @@ static int s_try_init_resume_state_from_persisted_data( struct aws_s3_list_parts_params list_parts_params = { .key = request_path, .upload_id = aws_byte_cursor_from_string(auto_ranged_put->upload_id), - .on_part = s_process_part_info, + .on_part = s_process_part_info_synced, .user_data = auto_ranged_put, }; @@ -191,10 +274,10 @@ static int s_try_init_resume_state_from_persisted_data( struct aws_http_headers *needed_response_headers = aws_http_headers_new(allocator); const size_t copy_header_count = AWS_ARRAY_SIZE(s_create_multipart_upload_copy_headers); - struct aws_http_headers *initial_headers = + const struct aws_http_headers *initial_headers = aws_http_message_get_headers(auto_ranged_put->base.initial_request_message); - /* Copy headers that would have been used for create multi part from initial message, since create will never be + /* Copy headers that would have been used for create multipart from initial message, since create will never be * called in this flow */ for (size_t header_index = 0; header_index < copy_header_count; ++header_index) { const struct aws_byte_cursor *header_name = &s_create_multipart_upload_copy_headers[header_index]; @@ -228,6 +311,7 @@ struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_put_new( struct aws_allocator *allocator, struct aws_s3_client *client, size_t part_size, + bool has_content_length, uint64_t content_length, uint32_t num_parts, const struct aws_s3_meta_request_options *options) { @@ -237,7 +321,6 @@ struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_put_new( AWS_PRECONDITION(client); AWS_PRECONDITION(options); AWS_PRECONDITION(options->message); - AWS_PRECONDITION(aws_http_message_get_body_stream(options->message)); if (s_try_update_part_info_from_resume_token(content_length, options->resume_token, &part_size, &num_parts)) { return NULL; @@ -260,20 +343,21 @@ struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_put_new( return NULL; } - auto_ranged_put->content_length = content_length; - auto_ranged_put->synced_data.total_num_parts = num_parts; + auto_ranged_put->has_content_length = has_content_length; + auto_ranged_put->content_length = has_content_length ? content_length : 0; + auto_ranged_put->total_num_parts_from_content_length = has_content_length ? num_parts : 0; auto_ranged_put->upload_id = NULL; auto_ranged_put->resume_token = options->resume_token; aws_s3_meta_request_resume_token_acquire(auto_ranged_put->resume_token); auto_ranged_put->threaded_update_data.next_part_number = 1; - auto_ranged_put->prepare_data.num_parts_read_from_stream = 0; + auto_ranged_put->synced_data.is_body_stream_at_end = false; + + uint32_t initial_num_parts = auto_ranged_put->has_content_length ? num_parts : s_unknown_length_default_num_parts; - struct aws_string **etag_c_array = aws_mem_calloc(allocator, sizeof(struct aws_string *), num_parts); - aws_array_list_init_static( - &auto_ranged_put->synced_data.etag_list, etag_c_array, num_parts, sizeof(struct aws_string *)); - auto_ranged_put->encoded_checksum_list = aws_mem_calloc(allocator, sizeof(struct aws_byte_buf), num_parts); + aws_array_list_init_dynamic( + &auto_ranged_put->synced_data.part_list, allocator, initial_num_parts, sizeof(struct aws_s3_mpu_part_info *)); if (s_try_init_resume_state_from_persisted_data(allocator, auto_ranged_put, options->resume_token)) { goto error_clean_up; @@ -303,25 +387,66 @@ static void s_s3_meta_request_auto_ranged_put_destroy(struct aws_s3_meta_request aws_s3_paginated_operation_release(auto_ranged_put->synced_data.list_parts_operation); - for (size_t etag_index = 0; etag_index < auto_ranged_put->synced_data.total_num_parts; ++etag_index) { - struct aws_string *etag = NULL; + for (size_t part_index = 0; part_index < aws_array_list_length(&auto_ranged_put->synced_data.part_list); + ++part_index) { - aws_array_list_get_at(&auto_ranged_put->synced_data.etag_list, &etag, etag_index); - aws_string_destroy(etag); + struct aws_s3_mpu_part_info *part; + aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, part_index); + if (part != NULL) { + aws_byte_buf_clean_up(&part->checksum_base64); + aws_string_destroy(part->etag); + aws_mem_release(auto_ranged_put->base.allocator, part); + } } + aws_array_list_clean_up(&auto_ranged_put->synced_data.part_list); aws_string_destroy(auto_ranged_put->synced_data.list_parts_continuation_token); - for (size_t checksum_index = 0; checksum_index < auto_ranged_put->synced_data.total_num_parts; ++checksum_index) { - aws_byte_buf_clean_up(&auto_ranged_put->encoded_checksum_list[checksum_index]); - } - aws_mem_release(meta_request->allocator, auto_ranged_put->synced_data.etag_list.data); - aws_mem_release(meta_request->allocator, auto_ranged_put->encoded_checksum_list); - aws_array_list_clean_up(&auto_ranged_put->synced_data.etag_list); aws_http_headers_release(auto_ranged_put->synced_data.needed_response_headers); aws_mem_release(meta_request->allocator, auto_ranged_put); } +/* Check flags and corresponding conditions to see if any more parts can be + * scheduled during this pass. */ +static bool s_should_skip_scheduling_more_parts_based_on_flags( + const struct aws_s3_auto_ranged_put *auto_ranged_put, + uint32_t flags) { + + /* If the stream is actually async, only allow 1 pending-read. + * We need to wait for async read() to complete before calling it again. */ + if (auto_ranged_put->base.request_body_async_stream != NULL) { + return auto_ranged_put->synced_data.num_parts_pending_read > 0; + } + + /* If doing async-writes, only allow a new part if there's a pending write-future, + * and no pending-reads yet to copy that data. */ + if (auto_ranged_put->base.request_body_using_async_writes == true) { + return (auto_ranged_put->base.synced_data.async_write.future == NULL) || + (auto_ranged_put->synced_data.num_parts_pending_read > 0); + } + + /* If this is the conservative pass, only allow 1 pending-read. + * Reads are serial anyway, so queuing up a whole bunch isn't necessarily a speedup. */ + if ((flags & AWS_S3_META_REQUEST_UPDATE_FLAG_CONSERVATIVE) != 0) { + return auto_ranged_put->synced_data.num_parts_pending_read > 0; + } + + /* In all other cases, cap the number of pending-reads to something reasonable */ + return auto_ranged_put->synced_data.num_parts_pending_read >= s_max_parts_pending_read; +} + +static void s_s3_auto_ranged_put_send_request_finish( + struct aws_s3_connection *connection, + struct aws_http_stream *stream, + int error_code) { + struct aws_s3_request *request = connection->request; + if (request->request_tag == AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART) { + /* TODO: the single part upload may also be improved from a timeout as multipart. */ + aws_s3_client_update_upload_part_timeout(request->meta_request->client, request, error_code); + } + aws_s3_meta_request_send_request_finish_default(connection, stream, error_code); +} + static bool s_s3_auto_ranged_put_update( struct aws_s3_meta_request *meta_request, uint32_t flags, @@ -339,12 +464,13 @@ static bool s_s3_auto_ranged_put_update( aws_s3_meta_request_lock_synced_data(meta_request); if (!aws_s3_meta_request_has_finish_result_synced(meta_request)) { - /* If resuming and list part has not be sent, do it now. */ + /* If resuming and list part has not been sent, do it now. */ if (!auto_ranged_put->synced_data.list_parts_state.started) { request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS, - 0, + AWS_S3_REQUEST_TYPE_LIST_PARTS, + 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); auto_ranged_put->synced_data.list_parts_state.started = true; @@ -358,7 +484,8 @@ static bool s_s3_auto_ranged_put_update( request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS, - 0, + AWS_S3_REQUEST_TYPE_LIST_PARTS, + 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); auto_ranged_put->synced_data.list_parts_state.continues = false; goto has_work_remaining; @@ -374,7 +501,8 @@ static bool s_s3_auto_ranged_put_update( request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD, - 0, + AWS_S3_REQUEST_TYPE_CREATE_MULTIPART_UPLOAD, + 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); auto_ranged_put->synced_data.create_multipart_upload_sent = true; @@ -388,69 +516,86 @@ static bool s_s3_auto_ranged_put_update( goto has_work_remaining; } - /* If we haven't sent all of the parts yet, then set up to send a new part now. */ - if (auto_ranged_put->synced_data.num_parts_sent < auto_ranged_put->synced_data.total_num_parts) { + bool should_create_next_part_request = false; + bool request_previously_uploaded = false; + if (auto_ranged_put->has_content_length && (auto_ranged_put->synced_data.num_parts_started < + auto_ranged_put->total_num_parts_from_content_length)) { + /* Check if next part was previously uploaded (due to resume) */ + size_t part_index = auto_ranged_put->threaded_update_data.next_part_number - 1; + + struct aws_s3_mpu_part_info *part = NULL; + aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, part_index); + if (part != NULL) { + AWS_ASSERT(part->was_previously_uploaded == true); + /* This part has been uploaded. */ + request_previously_uploaded = true; + } - /* Check if the etag/checksum list has the result already */ - int part_index = auto_ranged_put->threaded_update_data.next_part_number - 1; - for (size_t etag_index = part_index; - etag_index < aws_array_list_length(&auto_ranged_put->synced_data.etag_list); - ++etag_index) { - struct aws_string *etag = NULL; + if (s_should_skip_scheduling_more_parts_based_on_flags(auto_ranged_put, flags)) { + goto has_work_remaining; + } - if (!aws_array_list_get_at(&auto_ranged_put->synced_data.etag_list, &etag, etag_index) && etag) { - /* part already downloaded, skip it here and prepare will take care of adjusting the buffer */ - ++auto_ranged_put->threaded_update_data.next_part_number; + should_create_next_part_request = true; - } else { - // incomplete part found. break out and create request for it. - break; - } + } else if (!auto_ranged_put->has_content_length && !auto_ranged_put->synced_data.is_body_stream_at_end) { + + if (s_should_skip_scheduling_more_parts_based_on_flags(auto_ranged_put, flags)) { + goto has_work_remaining; } - // Something went really wrong. we still have parts to send, but have etags for all parts - AWS_FATAL_ASSERT( - auto_ranged_put->threaded_update_data.next_part_number <= - auto_ranged_put->synced_data.total_num_parts); + should_create_next_part_request = true; + } - if ((flags & AWS_S3_META_REQUEST_UPDATE_FLAG_CONSERVATIVE) != 0) { - uint32_t num_parts_in_flight = - (auto_ranged_put->synced_data.num_parts_sent - - auto_ranged_put->synced_data.num_parts_completed); + if (should_create_next_part_request) { - /* Because uploads must read from their streams serially, we try to limit the amount of in flight - * requests for a given multipart upload if we can. */ - if (num_parts_in_flight > 0) { - goto has_work_remaining; - } - } + struct aws_s3_buffer_pool_ticket *ticket = + aws_s3_buffer_pool_reserve(meta_request->client->buffer_pool, meta_request->part_size); - /* Allocate a request for another part. */ - request = aws_s3_request_new( - meta_request, - AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART, - 0, - AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); + if (ticket != NULL) { + /* Allocate a request for another part. */ + request = aws_s3_request_new( + meta_request, + AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART, + AWS_S3_REQUEST_TYPE_UPLOAD_PART, + 0 /*part_number*/, + AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_PART_SIZE_REQUEST_BODY); - request->part_number = auto_ranged_put->threaded_update_data.next_part_number; + request->part_number = auto_ranged_put->threaded_update_data.next_part_number; - ++auto_ranged_put->threaded_update_data.next_part_number; - ++auto_ranged_put->synced_data.num_parts_sent; + /* If request was previously uploaded, we prepare it to ensure checksums still match, + * but ultimately it gets marked no-op and we don't send it */ + request->was_previously_uploaded = request_previously_uploaded; - AWS_LOGF_DEBUG( - AWS_LS_S3_META_REQUEST, - "id=%p: Returning request %p for part %d", - (void *)meta_request, - (void *)request, - request->part_number); + request->ticket = ticket; + + ++auto_ranged_put->threaded_update_data.next_part_number; + ++auto_ranged_put->synced_data.num_parts_started; + ++auto_ranged_put->synced_data.num_parts_pending_read; + + AWS_LOGF_DEBUG( + AWS_LS_S3_META_REQUEST, + "id=%p: Returning request %p for part %d", + (void *)meta_request, + (void *)request, + request->part_number); + } goto has_work_remaining; } - /* There is one more request to send after all of the parts (the complete-multipart-upload) but it can't be - * done until all of the parts have been completed.*/ - if (auto_ranged_put->synced_data.num_parts_completed != auto_ranged_put->synced_data.total_num_parts) { - goto has_work_remaining; + /* There is one more request to send after all the parts (the complete-multipart-upload) but it can't be + * done until all the parts have been completed.*/ + if (auto_ranged_put->has_content_length) { + if (auto_ranged_put->synced_data.num_parts_completed != + auto_ranged_put->total_num_parts_from_content_length) { + goto has_work_remaining; + } + } else { + if ((!auto_ranged_put->synced_data.is_body_stream_at_end) || + auto_ranged_put->synced_data.num_parts_completed != + auto_ranged_put->synced_data.num_parts_started) { + goto has_work_remaining; + } } /* If the complete-multipart-upload request hasn't been set yet, then send it now. */ @@ -458,7 +603,8 @@ static bool s_s3_auto_ranged_put_update( request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD, - 0, + AWS_S3_REQUEST_TYPE_COMPLETE_MULTIPART_UPLOAD, + 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); auto_ranged_put->synced_data.complete_multipart_upload_sent = true; @@ -486,7 +632,7 @@ static bool s_s3_auto_ranged_put_update( /* If the number of parts completed is less than the number of parts sent, then we need to wait until all of * those parts are done sending before aborting. */ - if (auto_ranged_put->synced_data.num_parts_completed < auto_ranged_put->synced_data.num_parts_sent) { + if (auto_ranged_put->synced_data.num_parts_completed < auto_ranged_put->synced_data.num_parts_started) { goto has_work_remaining; } @@ -523,7 +669,8 @@ static bool s_s3_auto_ranged_put_update( request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD, - 0, + AWS_S3_REQUEST_TYPE_ABORT_MULTIPART_UPLOAD, + 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_ALWAYS_SEND); auto_ranged_put->synced_data.abort_multipart_upload_sent = true; @@ -543,9 +690,13 @@ static bool s_s3_auto_ranged_put_update( work_remaining = true; no_work_remaining: + /* If some events are still being delivered to caller, then wait for those to finish */ + if (!work_remaining && aws_s3_meta_request_are_events_out_for_delivery_synced(meta_request)) { + work_remaining = true; + } if (!work_remaining) { - aws_s3_meta_request_set_success_synced(meta_request, AWS_S3_RESPONSE_STATUS_SUCCESS); + aws_s3_meta_request_set_success_synced(meta_request, AWS_HTTP_STATUS_CODE_200_OK); } aws_s3_meta_request_unlock_synced_data(meta_request); @@ -568,29 +719,34 @@ static bool s_s3_auto_ranged_put_update( * Basically returns either part size or if content is not equally divisible into parts, the size of the remaining last * part. */ -static size_t s_compute_request_body_size(struct aws_s3_meta_request *meta_request, uint32_t part_number) { +static size_t s_compute_request_body_size( + const struct aws_s3_meta_request *meta_request, + uint32_t part_number, + uint64_t *offset_out) { AWS_PRECONDITION(meta_request); - struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; + const struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; size_t request_body_size = meta_request->part_size; /* Last part--adjust size to match remaining content length. */ - if (part_number == auto_ranged_put->synced_data.total_num_parts) { + if (auto_ranged_put->has_content_length && part_number == auto_ranged_put->total_num_parts_from_content_length) { size_t content_remainder = (size_t)(auto_ranged_put->content_length % (uint64_t)meta_request->part_size); if (content_remainder > 0) { request_body_size = content_remainder; } } + /* The part_number starts at 1 */ + *offset_out = (part_number - 1) * meta_request->part_size; return request_body_size; } static int s_verify_part_matches_checksum( struct aws_allocator *allocator, - struct aws_byte_buf part_body, + struct aws_byte_cursor body_cur, enum aws_s3_checksum_algorithm algorithm, - struct aws_byte_buf part_checksum) { + struct aws_byte_cursor part_checksum) { AWS_PRECONDITION(allocator); if (algorithm == AWS_SCA_NONE) { @@ -605,7 +761,6 @@ static int s_verify_part_matches_checksum( struct aws_byte_buf encoded_checksum = {0}; int return_status = AWS_OP_SUCCESS; - struct aws_byte_cursor body_cur = aws_byte_cursor_from_buf(&part_body); size_t encoded_len = 0; if (aws_base64_compute_encoded_len(aws_get_digest_size_from_algorithm(algorithm), &encoded_len)) { @@ -636,7 +791,7 @@ static int s_verify_part_matches_checksum( goto on_done; } - if (!aws_byte_buf_eq(&encoded_checksum, &part_checksum)) { + if (!aws_byte_cursor_eq_byte_buf(&part_checksum, &encoded_checksum)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Failed to resume upload. Checksum for previously uploaded part does not match"); return_status = aws_raise_error(AWS_ERROR_S3_RESUMED_PART_CHECKSUM_MISMATCH); @@ -649,263 +804,511 @@ on_done: return return_status; } -/** - * Skips parts from input stream that were previously uploaded. - * Assumes input stream has num_parts_read_from_stream specifying which part stream is on - * and will read into temp buffer until it gets to skip_until_part_number (i.e. skipping does include - * that part). If checksum is set on the request and parts with checksums were uploaded before, checksum will be - * verified. - */ -static int s_skip_parts_from_stream( - struct aws_s3_meta_request *meta_request, - uint32_t num_parts_read_from_stream, - uint32_t skip_until_part_number) { +/* Given a request, prepare it for sending based on its description. */ +static struct aws_future_void *s_s3_auto_ranged_put_prepare_request(struct aws_s3_request *request) { - AWS_PRECONDITION(meta_request); - AWS_PRECONDITION(num_parts_read_from_stream <= skip_until_part_number); + struct aws_future_void *asyncstep_prepare_request = aws_future_void_new(request->allocator); + + /* Store data for async job */ + struct aws_s3_auto_ranged_put_prepare_request_job *request_prep = + aws_mem_calloc(request->allocator, 1, sizeof(struct aws_s3_auto_ranged_put_prepare_request_job)); + request_prep->allocator = request->allocator; + request_prep->on_complete = aws_future_void_acquire(asyncstep_prepare_request); + request_prep->request = request; + + /* Each type of request prepares an aws_http_message in its own way, which maybe require async substeps */ + switch (request->request_tag) { + case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS: + request_prep->asyncstep_prepare_message = s_s3_prepare_list_parts(request); + break; + case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD: + request_prep->asyncstep_prepare_message = s_s3_prepare_create_multipart_upload(request); + break; + case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART: + request_prep->asyncstep_prepare_message = s_s3_prepare_upload_part(request); + break; + case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: + request_prep->asyncstep_prepare_message = s_s3_prepare_complete_multipart_upload(request); + break; + case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD: + request_prep->asyncstep_prepare_message = s_s3_prepare_abort_multipart_upload(request); + break; + default: + AWS_FATAL_ASSERT(0); + break; + } + + /* When the specific type of message is ready, finish common preparation steps */ + aws_future_http_message_register_callback( + request_prep->asyncstep_prepare_message, s_s3_auto_ranged_put_prepare_request_finish, request_prep); + + return asyncstep_prepare_request; +} +/* Prepare a ListParts request. + * Currently, this is actually synchronous. */ +static struct aws_future_http_message *s_s3_prepare_list_parts(struct aws_s3_request *request) { + struct aws_s3_meta_request *meta_request = request->meta_request; struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; - AWS_PRECONDITION(skip_until_part_number <= auto_ranged_put->synced_data.total_num_parts); + struct aws_http_message *message = NULL; + int message_creation_result = AWS_OP_ERR; + /* BEGIN CRITICAL SECTION */ + { + aws_s3_meta_request_lock_synced_data(meta_request); - if (num_parts_read_from_stream == skip_until_part_number) { - return AWS_OP_SUCCESS; + if (auto_ranged_put->synced_data.list_parts_continuation_token) { + AWS_LOGF_DEBUG( + AWS_LS_S3_META_REQUEST, + "id=%p ListParts for Multi-part Upload, with ID:%s, continues with token:%s.", + (void *)meta_request, + aws_string_c_str(auto_ranged_put->upload_id), + aws_string_c_str(auto_ranged_put->synced_data.list_parts_continuation_token)); + struct aws_byte_cursor continuation_cur = + aws_byte_cursor_from_string(auto_ranged_put->synced_data.list_parts_continuation_token); + message_creation_result = aws_s3_construct_next_paginated_request_http_message( + auto_ranged_put->synced_data.list_parts_operation, &continuation_cur, &message); + } else { + message_creation_result = aws_s3_construct_next_paginated_request_http_message( + auto_ranged_put->synced_data.list_parts_operation, NULL, &message); + } + + aws_s3_meta_request_unlock_synced_data(meta_request); } + /* END CRITICAL SECTION */ + /* ListPart will not fail to create the next message `s_construct_next_request_http_message` */ + AWS_FATAL_ASSERT(message_creation_result == AWS_OP_SUCCESS); + if (meta_request->checksum_config.checksum_algorithm == AWS_SCA_NONE) { + /* We don't need to worry about the pre-calculated checksum from user as for multipart upload, only way + * to calculate checksum for multipart upload is from client. */ + aws_s3_message_util_copy_headers( + meta_request->initial_request_message, + message, + g_s3_list_parts_excluded_headers, + g_s3_list_parts_excluded_headers_count, + true); + } else { + aws_s3_message_util_copy_headers( + meta_request->initial_request_message, + message, + g_s3_list_parts_with_checksum_excluded_headers, + g_s3_list_parts_with_checksum_excluded_headers_count, + true); + } + AWS_ASSERT(message); + struct aws_future_http_message *future = aws_future_http_message_new(request->allocator); + aws_future_http_message_set_result_by_move(future, &message); - struct aws_byte_buf temp_body_buf; - if (aws_byte_buf_init(&temp_body_buf, meta_request->allocator, 0)) { - return AWS_OP_ERR; + return future; +} + +/* Prepare a CreateMultipartUpload request. + * Currently, this is actually synchronous. */ +struct aws_future_http_message *s_s3_prepare_create_multipart_upload(struct aws_s3_request *request) { + struct aws_s3_meta_request *meta_request = request->meta_request; + + /* Create the message to create a new multipart upload. */ + struct aws_http_message *message = aws_s3_create_multipart_upload_message_new( + meta_request->allocator, + meta_request->initial_request_message, + meta_request->checksum_config.checksum_algorithm); + + struct aws_future_http_message *future = aws_future_http_message_new(request->allocator); + if (message != NULL) { + aws_future_http_message_set_result_by_move(future, &message); + } else { + aws_future_http_message_set_error(future, aws_last_error_or_unknown()); } + return future; +} - AWS_LOGF_DEBUG( - AWS_LS_S3_META_REQUEST, - "id=%p: Skipping parts %d through %d", - (void *)meta_request, - num_parts_read_from_stream, - skip_until_part_number); +/* Prepare an UploadPart request */ +struct aws_future_http_message *s_s3_prepare_upload_part(struct aws_s3_request *request) { + struct aws_s3_meta_request *meta_request = request->meta_request; + struct aws_allocator *allocator = request->allocator; + + struct aws_future_http_message *message_future = aws_future_http_message_new(allocator); + + struct aws_s3_prepare_upload_part_job *part_prep = + aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_prepare_upload_part_job)); + part_prep->allocator = allocator; + part_prep->request = request; + part_prep->on_complete = aws_future_http_message_acquire(message_future); + + if (request->num_times_prepared == 0) { + /* Preparing request for the first time. + * Next async step: read through the body stream until we've + * skipped over parts that were already uploaded (in case we're resuming + * from an upload that had been paused) */ + + /* Read the body */ + uint64_t offset = 0; + size_t request_body_size = s_compute_request_body_size(meta_request, request->part_number, &offset); + if (request->request_body.capacity == 0) { + AWS_FATAL_ASSERT(request->ticket); + request->request_body = + aws_s3_buffer_pool_acquire_buffer(request->meta_request->client->buffer_pool, request->ticket); + request->request_body.capacity = request_body_size; + } - int return_status = AWS_OP_SUCCESS; - for (uint32_t part_index = num_parts_read_from_stream; part_index < skip_until_part_number; ++part_index) { + part_prep->asyncstep_read_part = aws_s3_meta_request_read_body(meta_request, offset, &request->request_body); + aws_future_bool_register_callback( + part_prep->asyncstep_read_part, s_s3_prepare_upload_part_on_read_done, part_prep); + } else { + /* Not the first time preparing request (e.g. retry). + * We can skip over the async steps that read the body stream */ + s_s3_prepare_upload_part_finish(part_prep, AWS_ERROR_SUCCESS); + } + + return message_future; +} - size_t request_body_size = s_compute_request_body_size(meta_request, part_index + 1); +/* Completion callback for reading this part's chunk of the body stream */ +static void s_s3_prepare_upload_part_on_read_done(void *user_data) { + struct aws_s3_prepare_upload_part_job *part_prep = user_data; + struct aws_s3_request *request = part_prep->request; + struct aws_s3_meta_request *meta_request = request->meta_request; + struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; + bool has_content_length = auto_ranged_put->has_content_length != 0; - if (temp_body_buf.capacity != request_body_size) { - // reinit with correct size - aws_byte_buf_clean_up(&temp_body_buf); - if (aws_byte_buf_init(&temp_body_buf, meta_request->allocator, request_body_size)) { - return AWS_OP_ERR; - } - } else { - // reuse buffer - aws_byte_buf_reset(&temp_body_buf, false); + int error_code = aws_future_bool_get_error(part_prep->asyncstep_read_part); + + /* If reading failed, the prepare-upload-part job has failed */ + if (error_code != AWS_ERROR_SUCCESS) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Failed reading request body, error %d (%s) req len %zu req cap %zu", + (void *)meta_request, + error_code, + aws_error_str(error_code), + request->request_body.len, + request->request_body.capacity); + goto on_done; + } + /* Reading succeeded. */ + bool is_body_stream_at_end = aws_future_bool_get_result(part_prep->asyncstep_read_part); + + uint64_t offset = 0; + size_t request_body_size = s_compute_request_body_size(meta_request, request->part_number, &offset); + /* If Content-Length is defined, check that we read the expected amount */ + if (has_content_length && (request->request_body.len < request_body_size)) { + error_code = AWS_ERROR_S3_INCORRECT_CONTENT_LENGTH; + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Request body is smaller than 'Content-Length' header said it would be", + (void *)meta_request); + goto on_done; + } + request->is_noop = request->part_number > + 1 && /* allow first part to have 0 length to support empty unknown content length objects. */ + request->request_body.len == 0; + + /* BEGIN CRITICAL SECTION */ + aws_s3_meta_request_lock_synced_data(meta_request); + + --auto_ranged_put->synced_data.num_parts_pending_read; + + auto_ranged_put->synced_data.is_body_stream_at_end = is_body_stream_at_end; + struct aws_s3_mpu_part_info *previously_uploaded_info = NULL; + if (request->was_previously_uploaded) { + aws_array_list_get_at( + &auto_ranged_put->synced_data.part_list, &previously_uploaded_info, request->part_number - 1); + AWS_ASSERT(previously_uploaded_info != NULL && previously_uploaded_info->was_previously_uploaded == true); + /* Already uploaded, set the noop to be true. */ + request->is_noop = true; + } + if (!request->is_noop) { + /* The part can finish out of order. Resize array-list to be long enough to hold this part, + * filling any intermediate slots with NULL. */ + aws_array_list_ensure_capacity(&auto_ranged_put->synced_data.part_list, request->part_number); + while (aws_array_list_length(&auto_ranged_put->synced_data.part_list) < request->part_number) { + struct aws_s3_mpu_part_info *null_part = NULL; + aws_array_list_push_back(&auto_ranged_put->synced_data.part_list, &null_part); } + /* Add part to array-list */ + struct aws_s3_mpu_part_info *part = + aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_s3_mpu_part_info)); + part->size = request->request_body.len; + aws_array_list_set_at(&auto_ranged_put->synced_data.part_list, &part, request->part_number - 1); + } + aws_s3_meta_request_unlock_synced_data(meta_request); + /* END CRITICAL SECTION */ - if (aws_s3_meta_request_read_body(meta_request, &temp_body_buf)) { - AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Failed to resume upload. Input steam cannot be read."); - return_status = AWS_OP_ERR; + if (previously_uploaded_info) { + /* Part was previously uploaded, check that it matches what we just read. + * (Yes it's weird that we keep a pointer to the part_info even after + * releasing the lock that protects part_list. But it's the resizable + * part_list that needs lock protection. A previously uploaded part_info is const, + * and it's on the heap, so it's safe to keep the pointer around) */ + if (request->request_body.len != previously_uploaded_info->size) { + error_code = AWS_ERROR_S3_RESUME_FAILED; + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Failed resuming upload, previous upload used different part size.", + (void *)meta_request); goto on_done; } - - // compare skipped checksum to previously uploaded checksum - if (auto_ranged_put->encoded_checksum_list[part_index].len > 0 && + /* if previously uploaded part had a checksum, compare it to what we just skipped */ + if (previously_uploaded_info->checksum_base64.len > 0 && s_verify_part_matches_checksum( meta_request->allocator, - temp_body_buf, + aws_byte_cursor_from_buf(&request->request_body), meta_request->checksum_config.checksum_algorithm, - auto_ranged_put->encoded_checksum_list[part_index])) { - return_status = AWS_OP_ERR; + aws_byte_cursor_from_buf(&previously_uploaded_info->checksum_base64))) { + error_code = aws_last_error_or_unknown(); goto on_done; } } + /* We throttle the number of parts that can be "pending read" + * (e.g. only 1 at a time if reading from async-stream). + * Now that read is complete, poke the client to see if it can give us more work. + * + * Poking now gives measurable speedup (1%) for async streaming, + * vs waiting until all the part-prep steps are complete (still need to sign, etc) */ + aws_s3_client_schedule_process_work(meta_request->client); + on_done: - aws_byte_buf_clean_up(&temp_body_buf); - return return_status; + s_s3_prepare_upload_part_finish(part_prep, error_code); } -/* Given a request, prepare it for sending based on its description. */ -static int s_s3_auto_ranged_put_prepare_request( - struct aws_s3_meta_request *meta_request, - struct aws_s3_request *request) { - AWS_PRECONDITION(meta_request); +/* Finish async preparation of an UploadPart request */ +static void s_s3_prepare_upload_part_finish(struct aws_s3_prepare_upload_part_job *part_prep, int error_code) { + struct aws_s3_request *request = part_prep->request; + struct aws_s3_meta_request *meta_request = request->meta_request; + struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; + + if (error_code != AWS_ERROR_SUCCESS) { + aws_future_http_message_set_error(part_prep->on_complete, error_code); + goto on_done; + } + + struct aws_byte_buf *checksum_buf = NULL; + if (request->is_noop) { + AWS_LOGF_DEBUG( + AWS_LS_S3_META_REQUEST, + "id=%p UploadPart with part num %u for Multi-part Upload, with ID:%s" + "is noop due to encountering end of stream", + (void *)meta_request, + request->part_number, + aws_string_c_str(auto_ranged_put->upload_id)); + + } else { + + /* BEGIN CRITICAL SECTION */ + { + aws_s3_meta_request_lock_synced_data(meta_request); + struct aws_s3_mpu_part_info *part = NULL; + aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, request->part_number - 1); + AWS_ASSERT(part != NULL); + checksum_buf = &part->checksum_base64; + /* Clean up the buffer in case of it's initialized before and retry happens. */ + aws_byte_buf_clean_up(checksum_buf); + aws_s3_meta_request_unlock_synced_data(meta_request); + } + /* END CRITICAL SECTION */ + + AWS_LOGF_DEBUG( + AWS_LS_S3_META_REQUEST, + "id=%p UploadPart for Multi-part Upload, with ID:%s", + (void *)meta_request, + aws_string_c_str(auto_ranged_put->upload_id)); + } + + /* Create a new put-object message to upload a part. */ + struct aws_http_message *message = aws_s3_upload_part_message_new( + meta_request->allocator, + meta_request->initial_request_message, + &request->request_body, + request->part_number, + auto_ranged_put->upload_id, + meta_request->should_compute_content_md5, + &meta_request->checksum_config, + checksum_buf); + if (message == NULL) { + aws_future_http_message_set_error(part_prep->on_complete, aws_last_error()); + goto on_done; + } + + /* Success! */ + aws_future_http_message_set_result_by_move(part_prep->on_complete, &message); + +on_done: + AWS_FATAL_ASSERT(aws_future_http_message_is_done(part_prep->on_complete)); + aws_future_bool_release(part_prep->asyncstep_read_part); + aws_future_http_message_release(part_prep->on_complete); + aws_mem_release(part_prep->allocator, part_prep); +} +/* Allow user to review what we've uploaded, and fail the meta-request if they don't approve. */ +static int s_s3_review_multipart_upload(struct aws_s3_request *request) { + struct aws_s3_meta_request *meta_request = request->meta_request; struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; - AWS_PRECONDITION(auto_ranged_put); - struct aws_http_message *message = NULL; + /* If user registered no callback, then success! */ + if (meta_request->upload_review_callback == NULL) { + return AWS_OP_SUCCESS; + } - switch (request->request_tag) { - case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS: { + /* Prepare review info */ + struct aws_s3_upload_review review = { + .checksum_algorithm = meta_request->checksum_config.checksum_algorithm, + }; - int message_creation_result = AWS_OP_ERR; - /* BEGIN CRITICAL SECTION */ - { - aws_s3_meta_request_lock_synced_data(meta_request); + /* BEGIN CRITICAL SECTION */ + aws_s3_meta_request_lock_synced_data(meta_request); - if (auto_ranged_put->synced_data.list_parts_continuation_token) { - AWS_LOGF_DEBUG( - AWS_LS_S3_META_REQUEST, - "id=%p ListParts for Multi-part Upload, with ID:%s, continues with token:%s.", - (void *)meta_request, - aws_string_c_str(auto_ranged_put->upload_id), - aws_string_c_str(auto_ranged_put->synced_data.list_parts_continuation_token)); - struct aws_byte_cursor continuation_cur = - aws_byte_cursor_from_string(auto_ranged_put->synced_data.list_parts_continuation_token); - message_creation_result = aws_s3_construct_next_paginated_request_http_message( - auto_ranged_put->synced_data.list_parts_operation, &continuation_cur, &message); - } else { - message_creation_result = aws_s3_construct_next_paginated_request_http_message( - auto_ranged_put->synced_data.list_parts_operation, NULL, &message); - } + review.part_count = aws_array_list_length(&auto_ranged_put->synced_data.part_list); - aws_s3_meta_request_unlock_synced_data(meta_request); - } - /* END CRITICAL SECTION */ + if (review.part_count > 0) { + review.part_array = + aws_mem_calloc(meta_request->allocator, review.part_count, sizeof(struct aws_s3_upload_part_review)); - if (message_creation_result) { - goto message_create_failed; - } - if (meta_request->checksum_config.checksum_algorithm == AWS_SCA_NONE) { - /* We don't need to worry about the pre-calculated checksum from user as for multipart upload, only way - * to calculate checksum for multipart upload is from client. */ - aws_s3_message_util_copy_headers( - meta_request->initial_request_message, - message, - g_s3_list_parts_excluded_headers, - g_s3_list_parts_excluded_headers_count, - true); - } else { - aws_s3_message_util_copy_headers( - meta_request->initial_request_message, - message, - g_s3_list_parts_with_checksum_excluded_headers, - g_s3_list_parts_with_checksum_excluded_headers_count, - true); - } + for (size_t part_index = 0; part_index < review.part_count; ++part_index) { + struct aws_s3_mpu_part_info *part; + aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, part_index); - break; + struct aws_s3_upload_part_review *part_review = &review.part_array[part_index]; + part_review->size = part->size; + part_review->checksum = aws_byte_cursor_from_buf(&part->checksum_base64); } - case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD: { + } - /* Create the message to create a new multipart upload. */ - message = aws_s3_create_multipart_upload_message_new( - meta_request->allocator, - meta_request->initial_request_message, - meta_request->checksum_config.checksum_algorithm); + aws_s3_meta_request_unlock_synced_data(meta_request); + /* END CRITICAL SECTION */ - break; - } - case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART: { + /* Invoke callback */ + int error_code = AWS_ERROR_SUCCESS; + if (meta_request->upload_review_callback(meta_request, &review, meta_request->user_data) != AWS_OP_SUCCESS) { + error_code = aws_last_error_or_unknown(); + } - size_t request_body_size = s_compute_request_body_size(meta_request, request->part_number); + /* Clean up review info */ + aws_mem_release(meta_request->allocator, review.part_array); - if (request->num_times_prepared == 0) { - if (s_skip_parts_from_stream( - meta_request, - auto_ranged_put->prepare_data.num_parts_read_from_stream, - request->part_number - 1)) { - goto message_create_failed; - } - auto_ranged_put->prepare_data.num_parts_read_from_stream = request->part_number - 1; + if (error_code == AWS_ERROR_SUCCESS) { + return AWS_OP_SUCCESS; + } else { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Upload review callback raised error %d (%s)", + (void *)meta_request, + error_code, + aws_error_str(error_code)); + return aws_raise_error(error_code); + } +} - aws_byte_buf_init(&request->request_body, meta_request->allocator, request_body_size); +/* Prepare a CompleteMultipartUpload request. */ +static struct aws_future_http_message *s_s3_prepare_complete_multipart_upload(struct aws_s3_request *request) { + struct aws_s3_meta_request *meta_request = request->meta_request; + struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; + struct aws_allocator *allocator = request->allocator; - if (aws_s3_meta_request_read_body(meta_request, &request->request_body)) { - goto message_create_failed; - } - ++auto_ranged_put->prepare_data.num_parts_read_from_stream; - } - /* Create a new put-object message to upload a part. */ - message = aws_s3_upload_part_message_new( - meta_request->allocator, - meta_request->initial_request_message, - &request->request_body, - request->part_number, - auto_ranged_put->upload_id, - meta_request->should_compute_content_md5, - &meta_request->checksum_config, - &auto_ranged_put->encoded_checksum_list[request->part_number - 1]); - break; + struct aws_future_http_message *message_future = aws_future_http_message_new(allocator); + + AWS_FATAL_ASSERT(auto_ranged_put->upload_id); + + if (request->num_times_prepared == 0) { + /* Invoke upload_review_callback, and fail meta-request if user raises an error */ + if (s_s3_review_multipart_upload(request) != AWS_OP_SUCCESS) { + aws_future_http_message_set_error(message_future, aws_last_error()); + goto on_done; } - case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: { - if (request->num_times_prepared == 0) { + /* Allocate request body */ + aws_byte_buf_init( + &request->request_body, meta_request->allocator, s_complete_multipart_upload_init_body_size_bytes); - /* Corner case of last part being previously uploaded during resume. - * Read it from input stream and potentially verify checksum */ - if (s_skip_parts_from_stream( - meta_request, - auto_ranged_put->prepare_data.num_parts_read_from_stream, - auto_ranged_put->synced_data.total_num_parts)) { - goto message_create_failed; - } - auto_ranged_put->prepare_data.num_parts_read_from_stream = auto_ranged_put->synced_data.total_num_parts; + } else { + /* This is a retry, reset request body */ + aws_byte_buf_reset(&request->request_body, false); + } - aws_byte_buf_init( - &request->request_body, meta_request->allocator, s_complete_multipart_upload_init_body_size_bytes); - } else { - aws_byte_buf_reset(&request->request_body, false); - } + /* BEGIN CRITICAL SECTION */ + aws_s3_meta_request_lock_synced_data(meta_request); - /* BEGIN CRITICAL SECTION */ - { - aws_s3_meta_request_lock_synced_data(meta_request); + /* Build the message to complete our multipart upload, which includes a payload describing all of + * our completed parts. */ + struct aws_http_message *message = aws_s3_complete_multipart_message_new( + meta_request->allocator, + meta_request->initial_request_message, + &request->request_body, + auto_ranged_put->upload_id, + &auto_ranged_put->synced_data.part_list, + meta_request->checksum_config.checksum_algorithm); - AWS_FATAL_ASSERT(auto_ranged_put->upload_id); - AWS_ASSERT(request->request_body.capacity > 0); - aws_byte_buf_reset(&request->request_body, false); - - /* Build the message to complete our multipart upload, which includes a payload describing all of - * our completed parts. */ - message = aws_s3_complete_multipart_message_new( - meta_request->allocator, - meta_request->initial_request_message, - &request->request_body, - auto_ranged_put->upload_id, - &auto_ranged_put->synced_data.etag_list, - auto_ranged_put->encoded_checksum_list, - meta_request->checksum_config.checksum_algorithm); + aws_s3_meta_request_unlock_synced_data(meta_request); + /* END CRITICAL SECTION */ - aws_s3_meta_request_unlock_synced_data(meta_request); - } - /* END CRITICAL SECTION */ + if (message == NULL) { + aws_future_http_message_set_error(message_future, aws_last_error()); + goto on_done; + } - break; - } - case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD: { - AWS_FATAL_ASSERT(auto_ranged_put->upload_id); - AWS_LOGF_DEBUG( - AWS_LS_S3_META_REQUEST, - "id=%p Abort multipart upload request for upload id %s.", - (void *)meta_request, - aws_string_c_str(auto_ranged_put->upload_id)); + /* Success! */ + aws_future_http_message_set_result_by_move(message_future, &message); - if (request->num_times_prepared == 0) { - aws_byte_buf_init( - &request->request_body, meta_request->allocator, s_abort_multipart_upload_init_body_size_bytes); - } else { - aws_byte_buf_reset(&request->request_body, false); - } +on_done: + AWS_FATAL_ASSERT(aws_future_http_message_is_done(message_future)); + return message_future; +} + +/* Prepare an AbortMultipartUpload request. + * Currently, this is actually synchronous. */ +struct aws_future_http_message *s_s3_prepare_abort_multipart_upload(struct aws_s3_request *request) { + struct aws_s3_meta_request *meta_request = request->meta_request; + struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; - /* Build the message to abort our multipart upload */ - message = aws_s3_abort_multipart_upload_message_new( - meta_request->allocator, meta_request->initial_request_message, auto_ranged_put->upload_id); + AWS_FATAL_ASSERT(auto_ranged_put->upload_id); + AWS_LOGF_DEBUG( + AWS_LS_S3_META_REQUEST, + "id=%p Abort multipart upload request for upload id %s.", + (void *)meta_request, + aws_string_c_str(auto_ranged_put->upload_id)); - break; - } + if (request->num_times_prepared == 0) { + aws_byte_buf_init( + &request->request_body, meta_request->allocator, s_abort_multipart_upload_init_body_size_bytes); + } else { + aws_byte_buf_reset(&request->request_body, false); } - if (message == NULL) { + /* Build the message to abort our multipart upload */ + struct aws_http_message *message = aws_s3_abort_multipart_upload_message_new( + meta_request->allocator, meta_request->initial_request_message, auto_ranged_put->upload_id); + + struct aws_future_http_message *future = aws_future_http_message_new(request->allocator); + if (message != NULL) { + aws_future_http_message_set_result_by_move(future, &message); + } else { + aws_future_http_message_set_error(future, aws_last_error_or_unknown()); + } + return future; +} + +/* Finish the vtable->prepare_request() job */ +static void s_s3_auto_ranged_put_prepare_request_finish(void *user_data) { + struct aws_s3_auto_ranged_put_prepare_request_job *request_prep = user_data; + struct aws_s3_request *request = request_prep->request; + struct aws_s3_meta_request *meta_request = request->meta_request; + + /* Did we successfully create the type-specific HTTP message? */ + int error_code = aws_future_http_message_get_error(request_prep->asyncstep_prepare_message); + if (error_code != AWS_ERROR_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not allocate message for request with tag %d for auto-ranged-put meta request.", (void *)meta_request, request->request_tag); - goto message_create_failed; + + goto on_done; } + /* Success! Apply aws_http_message to aws_s3_request */ + struct aws_http_message *message = + aws_future_http_message_get_result_by_move(request_prep->asyncstep_prepare_message); aws_s3_request_setup_send_data(request, message); - aws_http_message_release(message); AWS_LOGF_DEBUG( @@ -915,35 +1318,16 @@ static int s_s3_auto_ranged_put_prepare_request( (void *)request, request->part_number); - return AWS_OP_SUCCESS; - -message_create_failed: - - return AWS_OP_ERR; -} - -/* Invoked before retry */ -static void s_s3_auto_ranged_put_send_request_finish( - struct aws_s3_connection *connection, - struct aws_http_stream *stream, - int error_code) { - - struct aws_s3_request *request = connection->request; - AWS_PRECONDITION(request); - - /* Request tag is different from different type of meta requests */ - switch (request->request_tag) { - - case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: { - /* For complete multipart upload, the server may return async error. */ - aws_s3_meta_request_send_request_finish_handle_async_error(connection, stream, error_code); - break; - } - - default: - aws_s3_meta_request_send_request_finish_default(connection, stream, error_code); - break; +on_done: + if (error_code == AWS_ERROR_SUCCESS) { + aws_future_void_set_result(request_prep->on_complete); + } else { + aws_future_void_set_error(request_prep->on_complete, error_code); } + + aws_future_http_message_release(request_prep->asyncstep_prepare_message); + aws_future_void_release(request_prep->on_complete); + aws_mem_release(request_prep->allocator, request_prep); } /* Invoked when no-retry will happen */ @@ -957,84 +1341,88 @@ static void s_s3_auto_ranged_put_request_finished( AWS_PRECONDITION(request); struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; + aws_s3_meta_request_lock_synced_data(meta_request); switch (request->request_tag) { case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS: { - /* BEGIN CRITICAL SECTION */ - { - aws_s3_meta_request_lock_synced_data(meta_request); - bool has_more_results = false; + bool has_more_results = false; - if (error_code == AWS_ERROR_SUCCESS) { + if (error_code == AWS_ERROR_SUCCESS) { - struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&request->send_data.response_body); - /* Clear the token before */ - aws_string_destroy(auto_ranged_put->synced_data.list_parts_continuation_token); - auto_ranged_put->synced_data.list_parts_continuation_token = NULL; - if (aws_s3_paginated_operation_on_response( - auto_ranged_put->synced_data.list_parts_operation, - &body_cursor, - &auto_ranged_put->synced_data.list_parts_continuation_token, - &has_more_results)) { - AWS_LOGF_ERROR( - AWS_LS_S3_META_REQUEST, "id=%p Failed to parse list parts response.", (void *)meta_request); - error_code = AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED; - } else if (!has_more_results) { - for (size_t etag_index = 0; - etag_index < aws_array_list_length(&auto_ranged_put->synced_data.etag_list); - etag_index++) { - struct aws_string *etag = NULL; - aws_array_list_get_at(&auto_ranged_put->synced_data.etag_list, &etag, etag_index); - if (etag != NULL) { - /* Update the number of parts sent/completed previously */ - ++auto_ranged_put->synced_data.num_parts_sent; - ++auto_ranged_put->synced_data.num_parts_completed; - } + struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&request->send_data.response_body); + /* Clear the token before */ + aws_string_destroy(auto_ranged_put->synced_data.list_parts_continuation_token); + auto_ranged_put->synced_data.list_parts_continuation_token = NULL; + if (aws_s3_paginated_operation_on_response( + auto_ranged_put->synced_data.list_parts_operation, + &body_cursor, + &auto_ranged_put->synced_data.list_parts_continuation_token, + &has_more_results)) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, "id=%p Failed to parse list parts response.", (void *)meta_request); + error_code = AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED; + } else if (!has_more_results) { + uint64_t bytes_previously_uploaded = 0; + int parts_previously_uploaded = 0; + + for (size_t part_index = 0; + part_index < aws_array_list_length(&auto_ranged_put->synced_data.part_list); + part_index++) { + struct aws_s3_mpu_part_info *part = NULL; + aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, part_index); + if (part != NULL) { + /* Update the number of parts sent/completed previously */ + ++parts_previously_uploaded; + bytes_previously_uploaded += part->size; } + } - AWS_LOGF_DEBUG( - AWS_LS_S3_META_REQUEST, - "id=%p: Resuming PutObject. %d out of %d parts have completed during previous request.", - (void *)meta_request, - auto_ranged_put->synced_data.num_parts_completed, - auto_ranged_put->synced_data.total_num_parts); + AWS_LOGF_DEBUG( + AWS_LS_S3_META_REQUEST, + "id=%p: Resuming PutObject. %d out of %d parts have completed during previous request.", + (void *)meta_request, + parts_previously_uploaded, + auto_ranged_put->total_num_parts_from_content_length); + + /* Deliver an initial progress_callback to report all previously uploaded parts. */ + if (meta_request->progress_callback != NULL && bytes_previously_uploaded > 0) { + struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_PROGRESS}; + event.u.progress.info.bytes_transferred = bytes_previously_uploaded; + event.u.progress.info.content_length = auto_ranged_put->content_length; + aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); } } + } - if (has_more_results) { - /* If list parts has more result, make sure list parts continues */ - auto_ranged_put->synced_data.list_parts_state.continues = true; - auto_ranged_put->synced_data.list_parts_state.completed = false; - } else { - /* No more result, complete the list parts */ - auto_ranged_put->synced_data.list_parts_state.continues = false; - auto_ranged_put->synced_data.list_parts_state.completed = true; - } - auto_ranged_put->synced_data.list_parts_error_code = error_code; + if (has_more_results) { + /* If list parts has more result, make sure list parts continues */ + auto_ranged_put->synced_data.list_parts_state.continues = true; + auto_ranged_put->synced_data.list_parts_state.completed = false; + } else { + /* No more result, complete the list parts */ + auto_ranged_put->synced_data.list_parts_state.continues = false; + auto_ranged_put->synced_data.list_parts_state.completed = true; + } + auto_ranged_put->synced_data.list_parts_error_code = error_code; - if (error_code != AWS_ERROR_SUCCESS) { - if (request->send_data.response_status == AWS_HTTP_STATUS_CODE_404_NOT_FOUND && - auto_ranged_put->resume_token->num_parts_completed == - auto_ranged_put->resume_token->total_num_parts) { - AWS_LOGF_DEBUG( - AWS_LS_S3_META_REQUEST, - "id=%p: Resuming PutObject ended early, since there is nothing to resume" - "(request finished prior to being paused?)", - (void *)meta_request); + if (error_code != AWS_ERROR_SUCCESS) { + if (request->send_data.response_status == AWS_HTTP_STATUS_CODE_404_NOT_FOUND && + auto_ranged_put->resume_token->num_parts_completed == + auto_ranged_put->resume_token->total_num_parts) { + AWS_LOGF_DEBUG( + AWS_LS_S3_META_REQUEST, + "id=%p: Resuming PutObject ended early, since there is nothing to resume" + "(request finished prior to being paused?)", + (void *)meta_request); - aws_s3_meta_request_set_success_synced(meta_request, AWS_S3_RESPONSE_STATUS_SUCCESS); - } else { - aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); - } + aws_s3_meta_request_set_success_synced(meta_request, AWS_HTTP_STATUS_CODE_200_OK); + } else { + aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } - - aws_s3_meta_request_unlock_synced_data(meta_request); } - /* END CRITICAL SECTION */ - break; - } + } break; case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD: { struct aws_http_headers *needed_response_headers = NULL; @@ -1055,13 +1443,14 @@ static void s_s3_auto_ranged_put_request_finished( } } - struct aws_byte_cursor buffer_byte_cursor = aws_byte_cursor_from_buf(&request->send_data.response_body); + struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(&request->send_data.response_body); /* Find the upload id for this multipart upload. */ - struct aws_string *upload_id = - aws_xml_get_top_level_tag(meta_request->allocator, &s_upload_id, &buffer_byte_cursor); + struct aws_byte_cursor upload_id = {0}; + const char *xml_path[] = {"InitiateMultipartUploadResult", "UploadId", NULL}; + aws_xml_get_body_at_path(meta_request->allocator, xml_doc, xml_path, &upload_id); - if (upload_id == NULL) { + if (upload_id.len == 0) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not find upload-id in create-multipart-upload response", @@ -1071,98 +1460,100 @@ static void s_s3_auto_ranged_put_request_finished( error_code = AWS_ERROR_S3_MISSING_UPLOAD_ID; } else { /* Store the multipart upload id. */ - auto_ranged_put->upload_id = upload_id; + auto_ranged_put->upload_id = aws_string_new_from_cursor(meta_request->allocator, &upload_id); } } - /* BEGIN CRITICAL SECTION */ - { - aws_s3_meta_request_lock_synced_data(meta_request); - - AWS_ASSERT(auto_ranged_put->synced_data.needed_response_headers == NULL); - auto_ranged_put->synced_data.needed_response_headers = needed_response_headers; + AWS_ASSERT(auto_ranged_put->synced_data.needed_response_headers == NULL); + auto_ranged_put->synced_data.needed_response_headers = needed_response_headers; - auto_ranged_put->synced_data.create_multipart_upload_completed = true; - auto_ranged_put->synced_data.list_parts_error_code = error_code; + auto_ranged_put->synced_data.create_multipart_upload_completed = true; + auto_ranged_put->synced_data.list_parts_error_code = error_code; - if (error_code != AWS_ERROR_SUCCESS) { - aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); - } - - aws_s3_meta_request_unlock_synced_data(meta_request); + if (error_code != AWS_ERROR_SUCCESS) { + aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } - /* END CRITICAL SECTION */ - break; - } + } break; case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART: { size_t part_number = request->part_number; AWS_FATAL_ASSERT(part_number > 0); size_t part_index = part_number - 1; struct aws_string *etag = NULL; + bool request_is_noop = request->is_noop != 0; - if (error_code == AWS_ERROR_SUCCESS) { - /* Find the ETag header if it exists and cache it. */ - struct aws_byte_cursor etag_within_quotes; + if (!request_is_noop) { + if (error_code == AWS_ERROR_SUCCESS) { + /* Find the ETag header if it exists and cache it. */ + struct aws_byte_cursor etag_within_quotes; - AWS_ASSERT(request->send_data.response_headers); + AWS_ASSERT(request->send_data.response_headers); - if (aws_http_headers_get( - request->send_data.response_headers, g_etag_header_name, &etag_within_quotes) != - AWS_OP_SUCCESS) { - AWS_LOGF_ERROR( - AWS_LS_S3_META_REQUEST, - "id=%p Could not find ETag header for request %p", - (void *)meta_request, - (void *)request); + if (aws_http_headers_get( + request->send_data.response_headers, g_etag_header_name, &etag_within_quotes) != + AWS_OP_SUCCESS) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p Could not find ETag header for request %p", + (void *)meta_request, + (void *)request); - error_code = AWS_ERROR_S3_MISSING_ETAG; - } else { - /* The ETag value arrives in quotes, but we don't want it in quotes when we send it back up - * later, so just get rid of the quotes now. */ - etag = aws_strip_quotes(meta_request->allocator, etag_within_quotes); + error_code = AWS_ERROR_S3_MISSING_ETAG; + } else { + /* The ETag value arrives in quotes, but we don't want it in quotes when we send it back up + * later, so just get rid of the quotes now. */ + etag = aws_strip_quotes(meta_request->allocator, etag_within_quotes); + } } } - if (error_code == AWS_ERROR_SUCCESS && meta_request->progress_callback != NULL) { - struct aws_s3_meta_request_progress progress = { - .bytes_transferred = meta_request->part_size, - .content_length = auto_ranged_put->content_length, - }; - meta_request->progress_callback(meta_request, &progress, meta_request->user_data); - } - /* BEGIN CRITICAL SECTION */ - { - aws_s3_meta_request_lock_synced_data(meta_request); - ++auto_ranged_put->synced_data.num_parts_completed; + ++auto_ranged_put->synced_data.num_parts_completed; + + if (request_is_noop) { + ++auto_ranged_put->synced_data.num_parts_noop; + } + if (auto_ranged_put->has_content_length) { AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: %d out of %d parts have completed.", (void *)meta_request, auto_ranged_put->synced_data.num_parts_completed, - auto_ranged_put->synced_data.total_num_parts); + auto_ranged_put->total_num_parts_from_content_length); + } else { + AWS_LOGF_DEBUG( + AWS_LS_S3_META_REQUEST, + "id=%p: %d parts have completed.", + (void *)meta_request, + auto_ranged_put->synced_data.num_parts_completed); + } + if (!request_is_noop) { if (error_code == AWS_ERROR_SUCCESS) { AWS_ASSERT(etag != NULL); ++auto_ranged_put->synced_data.num_parts_successful; - /* ETags need to be associated with their part number, so we keep the etag indices consistent with - * part numbers. This means we may have to add padding to the list in the case that parts finish out - * of order. */ - aws_array_list_set_at(&auto_ranged_put->synced_data.etag_list, &etag, part_index); + /* Send progress_callback for delivery on io_event_loop thread */ + if (meta_request->progress_callback != NULL) { + struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_PROGRESS}; + event.u.progress.info.bytes_transferred = request->request_body.len; + event.u.progress.info.content_length = auto_ranged_put->content_length; + aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); + } + + /* Store part's ETag */ + struct aws_s3_mpu_part_info *part = NULL; + aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, part_index); + AWS_ASSERT(part != NULL); + AWS_ASSERT(part->etag == NULL); + part->etag = etag; } else { ++auto_ranged_put->synced_data.num_parts_failed; aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } - - aws_s3_meta_request_unlock_synced_data(meta_request); } - /* END CRITICAL SECTION */ - - break; - } + } break; case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: { if (error_code == AWS_ERROR_SUCCESS && meta_request->headers_callback != NULL) { @@ -1174,17 +1565,9 @@ static void s_s3_auto_ranged_put_request_finished( /* Copy over any response headers that we've previously determined are needed for this final * response. */ + copy_http_headers(auto_ranged_put->synced_data.needed_response_headers, final_response_headers); - /* BEGIN CRITICAL SECTION */ - { - aws_s3_meta_request_lock_synced_data(meta_request); - copy_http_headers(auto_ranged_put->synced_data.needed_response_headers, final_response_headers); - aws_s3_meta_request_unlock_synced_data(meta_request); - } - /* END CRITICAL SECTION */ - - struct aws_byte_cursor response_body_cursor = - aws_byte_cursor_from_buf(&request->send_data.response_body); + struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(&request->send_data.response_body); /** * TODO: The body of the response can be ERROR, check Error specified in body part from @@ -1194,24 +1577,24 @@ static void s_s3_auto_ranged_put_request_finished( */ /* Grab the ETag for the entire object, and set it as a header. */ - struct aws_string *etag_header_value = - aws_xml_get_top_level_tag(meta_request->allocator, &g_etag_header_name, &response_body_cursor); + struct aws_byte_cursor etag_header_value = {0}; + const char *xml_path[] = {"CompleteMultipartUploadResult", "ETag", NULL}; + aws_xml_get_body_at_path(meta_request->allocator, xml_doc, xml_path, &etag_header_value); - if (etag_header_value != NULL) { - struct aws_byte_buf etag_header_value_byte_buf; - AWS_ZERO_STRUCT(etag_header_value_byte_buf); - - replace_quote_entities(meta_request->allocator, etag_header_value, &etag_header_value_byte_buf); + if (etag_header_value.len > 0) { + struct aws_byte_buf etag_header_value_byte_buf = + aws_replace_quote_entities(meta_request->allocator, etag_header_value); aws_http_headers_set( final_response_headers, g_etag_header_name, aws_byte_cursor_from_buf(&etag_header_value_byte_buf)); - aws_string_destroy(etag_header_value); aws_byte_buf_clean_up(&etag_header_value_byte_buf); } + /* Invoke the callback without lock */ + aws_s3_meta_request_unlock_synced_data(meta_request); /* Notify the user of the headers. */ if (meta_request->headers_callback( meta_request, @@ -1222,37 +1605,28 @@ static void s_s3_auto_ranged_put_request_finished( error_code = aws_last_error_or_unknown(); } meta_request->headers_callback = NULL; + /* Grab the lock again after the callback */ + aws_s3_meta_request_lock_synced_data(meta_request); aws_http_headers_release(final_response_headers); } - /* BEGIN CRITICAL SECTION */ - { - aws_s3_meta_request_lock_synced_data(meta_request); - auto_ranged_put->synced_data.complete_multipart_upload_completed = true; - auto_ranged_put->synced_data.complete_multipart_upload_error_code = error_code; + auto_ranged_put->synced_data.complete_multipart_upload_completed = true; + auto_ranged_put->synced_data.complete_multipart_upload_error_code = error_code; - if (error_code != AWS_ERROR_SUCCESS) { - aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); - } - aws_s3_meta_request_unlock_synced_data(meta_request); + if (error_code != AWS_ERROR_SUCCESS) { + aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } - /* END CRITICAL SECTION */ - - break; - } + } break; case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD: { - /* BEGIN CRITICAL SECTION */ - { - aws_s3_meta_request_lock_synced_data(meta_request); - auto_ranged_put->synced_data.abort_multipart_upload_error_code = error_code; - auto_ranged_put->synced_data.abort_multipart_upload_completed = true; - aws_s3_meta_request_unlock_synced_data(meta_request); - } - /* END CRITICAL SECTION */ - break; - } + auto_ranged_put->synced_data.abort_multipart_upload_error_code = error_code; + auto_ranged_put->synced_data.abort_multipart_upload_completed = true; + + } break; } + + aws_s3_request_finish_up_metrics_synced(request, meta_request); + aws_s3_meta_request_unlock_synced_data(meta_request); } static int s_s3_auto_ranged_put_pause( @@ -1261,16 +1635,22 @@ static int s_s3_auto_ranged_put_pause( *out_resume_token = NULL; + struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; + if (!auto_ranged_put->has_content_length) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, "id=%p: Failed to pause request with unknown content length", (void *)meta_request); + return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); + } + /* lock */ aws_s3_meta_request_lock_synced_data(meta_request); - struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Pausing request with %u out of %u parts have completed.", (void *)meta_request, auto_ranged_put->synced_data.num_parts_completed, - auto_ranged_put->synced_data.total_num_parts); + auto_ranged_put->total_num_parts_from_content_length); /* upload can be in one of several states: * - not started, i.e. we didn't even call crete mpu yet - return success, @@ -1288,7 +1668,7 @@ static int s_s3_auto_ranged_put_pause( (*out_resume_token)->multipart_upload_id = aws_string_clone_or_reuse(meta_request->allocator, auto_ranged_put->upload_id); (*out_resume_token)->part_size = meta_request->part_size; - (*out_resume_token)->total_num_parts = auto_ranged_put->synced_data.total_num_parts; + (*out_resume_token)->total_num_parts = auto_ranged_put->total_num_parts_from_content_length; (*out_resume_token)->num_parts_completed = auto_ranged_put->synced_data.num_parts_completed; } @@ -1298,6 +1678,8 @@ static int s_s3_auto_ranged_put_pause( */ aws_s3_meta_request_set_fail_synced(meta_request, NULL, AWS_ERROR_S3_PAUSED); + aws_s3_meta_request_cancel_cancellable_requests_synced(meta_request, AWS_ERROR_S3_PAUSED); + /* unlock */ aws_s3_meta_request_unlock_synced_data(meta_request); diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_buffer_pool.c b/contrib/restricted/aws/aws-c-s3/source/s3_buffer_pool.c new file mode 100644 index 00000000000..fe62fe04106 --- /dev/null +++ b/contrib/restricted/aws/aws-c-s3/source/s3_buffer_pool.c @@ -0,0 +1,440 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include <aws/s3/private/s3_buffer_pool.h> + +#include <aws/common/array_list.h> +#include <aws/common/mutex.h> +#include <aws/s3/private/s3_util.h> + +/* + * S3 Buffer Pool. + * Fairly trivial implementation of "arena" style allocator. + * Note: current implementation is not optimized and instead tries to be + * as straightforward as possible. Given that pool manages a small number + * of big allocations, performance impact is not that bad, but something we need + * to look into on the next iteration. + * + * Basic approach is to divide acquires into primary and secondary. + * User provides chunk size during construction. Acquires below 4 * chunks_size + * are done from primary and the rest are from secondary. + * + * Primary storage consists of blocks that are each s_chunks_per_block * + * chunk_size in size. blocks are created on demand as needed. + * Acquire operation from primary basically works by determining how many chunks + * are needed and then finding available space in existing blocks or creating a + * new block. Acquire will always take over the whole chunk, so some space is + * likely wasted. + * Ex. say chunk_size is 8mb and s_chunks_per_block is 16, which makes block size 128mb. + * acquires up to 32mb will be done from primary. So 1 block can hold 4 buffers + * of 32mb (4 chunks) or 16 buffers of 8mb (1 chunk). If requested buffer size + * is 12mb, 2 chunks are used for acquire and 4mb will be wasted. + * Secondary storage delegates directly to system allocator. + */ + +struct aws_s3_buffer_pool_ticket { + size_t size; + uint8_t *ptr; + size_t chunks_used; +}; + +/* Default size for blocks array. Note: this is just for meta info, blocks + * themselves are not preallocated. */ +static size_t s_block_list_initial_capacity = 5; + +/* Amount of mem reserved for use outside of buffer pool. + * This is an optimistic upper bound on mem used as we dont track it. + * Covers both usage outside of pool, i.e. all allocations done as part of s3 + * client as well as any allocations overruns due to memory waste in the pool. */ +static const size_t s_buffer_pool_reserved_mem = MB_TO_BYTES(128); + +/* + * How many chunks make up a block in primary storage. + */ +static const size_t s_chunks_per_block = 16; + +/* + * Max size of chunks in primary. + * Effectively if client part size is above the following number, primary + * storage along with buffer reuse is disabled and all buffers are allocated + * directly using allocator. + */ +static const size_t s_max_chunk_size_for_buffer_reuse = MB_TO_BYTES(64); + +struct aws_s3_buffer_pool { + struct aws_allocator *base_allocator; + struct aws_mutex mutex; + + size_t block_size; + size_t chunk_size; + /* size at which allocations should go to secondary */ + size_t primary_size_cutoff; + + size_t mem_limit; + + bool has_reservation_hold; + + size_t primary_allocated; + size_t primary_reserved; + size_t primary_used; + + size_t secondary_reserved; + size_t secondary_used; + + struct aws_array_list blocks; +}; + +struct s3_buffer_pool_block { + size_t block_size; + uint8_t *block_ptr; + uint16_t alloc_bit_mask; +}; + +/* + * Sets n bits at position starting with LSB. + * Note: n must be at most 8, but in practice will always be at most 4. + * position + n should at most be 16 + */ +static inline uint16_t s_set_bits(uint16_t num, size_t position, size_t n) { + AWS_PRECONDITION(n <= 8); + AWS_PRECONDITION(position + n <= 16); + uint16_t mask = ((uint16_t)0x00FF) >> (8 - n); + return num | (mask << position); +} + +/* + * Clears n bits at position starting with LSB. + * Note: n must be at most 8, but in practice will always be at most 4. + * position + n should at most be 16 + */ +static inline uint16_t s_clear_bits(uint16_t num, size_t position, size_t n) { + AWS_PRECONDITION(n <= 8); + AWS_PRECONDITION(position + n <= 16); + uint16_t mask = ((uint16_t)0x00FF) >> (8 - n); + return num & ~(mask << position); +} + +/* + * Checks whether n bits are set at position starting with LSB. + * Note: n must be at most 8, but in practice will always be at most 4. + * position + n should at most be 16 + */ +static inline bool s_check_bits(uint16_t num, size_t position, size_t n) { + AWS_PRECONDITION(n <= 8); + AWS_PRECONDITION(position + n <= 16); + uint16_t mask = ((uint16_t)0x00FF) >> (8 - n); + return (num >> position) & mask; +} + +struct aws_s3_buffer_pool *aws_s3_buffer_pool_new( + struct aws_allocator *allocator, + size_t chunk_size, + size_t mem_limit) { + + if (mem_limit < GB_TO_BYTES(1)) { + AWS_LOGF_ERROR( + AWS_LS_S3_CLIENT, + "Failed to initialize buffer pool. " + "Minimum supported value for Memory Limit is 1GB."); + aws_raise_error(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG); + return NULL; + } + + if (chunk_size < (1024) || chunk_size % (4 * 1024) != 0) { + AWS_LOGF_WARN( + AWS_LS_S3_CLIENT, + "Part size specified on the client can lead to suboptimal performance. " + "Consider specifying size in multiples of 4KiB. Ideal part size for most transfers is " + "1MiB multiple between 8MiB and 16MiB. Note: the client will automatically scale part size " + "if its not sufficient to transfer data within the maximum number of parts"); + } + + size_t adjusted_mem_lim = mem_limit - s_buffer_pool_reserved_mem; + + /* + * TODO: There is several things we can consider tweaking here: + * - if chunk size is a weird number of bytes, force it to the closest page size? + * - grow chunk size max based on overall mem lim (ex. for 4gb it might be + * 64mb, but for 8gb it can be 128mb) + * - align chunk size to better fill available mem? some chunk sizes can + * result in memory being wasted because overall limit does not divide + * nicely into chunks + */ + if (chunk_size > s_max_chunk_size_for_buffer_reuse || chunk_size * s_chunks_per_block > adjusted_mem_lim) { + AWS_LOGF_WARN( + AWS_LS_S3_CLIENT, + "Part size specified on the client is too large for automatic buffer reuse. " + "Consider specifying a smaller part size to improve performance and memory utilization"); + chunk_size = 0; + } + + struct aws_s3_buffer_pool *buffer_pool = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_buffer_pool)); + + AWS_FATAL_ASSERT(buffer_pool != NULL); + + buffer_pool->base_allocator = allocator; + buffer_pool->chunk_size = chunk_size; + buffer_pool->block_size = s_chunks_per_block * chunk_size; + /* Somewhat arbitrary number. + * Tries to balance between how many allocations use buffer and buffer space + * being wasted. */ + buffer_pool->primary_size_cutoff = chunk_size * 4; + buffer_pool->mem_limit = adjusted_mem_lim; + int mutex_error = aws_mutex_init(&buffer_pool->mutex); + AWS_FATAL_ASSERT(mutex_error == AWS_OP_SUCCESS); + + aws_array_list_init_dynamic( + &buffer_pool->blocks, allocator, s_block_list_initial_capacity, sizeof(struct s3_buffer_pool_block)); + + return buffer_pool; +} + +void aws_s3_buffer_pool_destroy(struct aws_s3_buffer_pool *buffer_pool) { + if (buffer_pool == NULL) { + return; + } + + for (size_t i = 0; i < aws_array_list_length(&buffer_pool->blocks); ++i) { + struct s3_buffer_pool_block *block; + aws_array_list_get_at_ptr(&buffer_pool->blocks, (void **)&block, i); + + AWS_FATAL_ASSERT(block->alloc_bit_mask == 0 && "Allocator still has outstanding blocks"); + aws_mem_release(buffer_pool->base_allocator, block->block_ptr); + } + + aws_array_list_clean_up(&buffer_pool->blocks); + + aws_mutex_clean_up(&buffer_pool->mutex); + struct aws_allocator *base = buffer_pool->base_allocator; + aws_mem_release(base, buffer_pool); +} + +void s_buffer_pool_trim_synced(struct aws_s3_buffer_pool *buffer_pool) { + for (size_t i = 0; i < aws_array_list_length(&buffer_pool->blocks);) { + struct s3_buffer_pool_block *block; + aws_array_list_get_at_ptr(&buffer_pool->blocks, (void **)&block, i); + + if (block->alloc_bit_mask == 0) { + aws_mem_release(buffer_pool->base_allocator, block->block_ptr); + aws_array_list_erase(&buffer_pool->blocks, i); + /* do not increment since we just released element */ + } else { + ++i; + } + } +} + +void aws_s3_buffer_pool_trim(struct aws_s3_buffer_pool *buffer_pool) { + aws_mutex_lock(&buffer_pool->mutex); + s_buffer_pool_trim_synced(buffer_pool); + aws_mutex_unlock(&buffer_pool->mutex); +} + +struct aws_s3_buffer_pool_ticket *aws_s3_buffer_pool_reserve(struct aws_s3_buffer_pool *buffer_pool, size_t size) { + AWS_PRECONDITION(buffer_pool); + + if (buffer_pool->has_reservation_hold) { + return NULL; + } + + AWS_FATAL_ASSERT(size != 0); + AWS_FATAL_ASSERT(size <= buffer_pool->mem_limit); + + struct aws_s3_buffer_pool_ticket *ticket = NULL; + aws_mutex_lock(&buffer_pool->mutex); + + size_t overall_taken = buffer_pool->primary_used + buffer_pool->primary_reserved + buffer_pool->secondary_used + + buffer_pool->secondary_reserved; + + /* + * If we are allocating from secondary and there is unused space in + * primary, trim the primary in hopes we can free up enough memory. + * TODO: something smarter, like partial trim? + */ + if (size > buffer_pool->primary_size_cutoff && (size + overall_taken) > buffer_pool->mem_limit && + (buffer_pool->primary_allocated > + (buffer_pool->primary_used + buffer_pool->primary_reserved + buffer_pool->block_size))) { + s_buffer_pool_trim_synced(buffer_pool); + overall_taken = buffer_pool->primary_used + buffer_pool->primary_reserved + buffer_pool->secondary_used + + buffer_pool->secondary_reserved; + } + + if ((size + overall_taken) <= buffer_pool->mem_limit) { + ticket = aws_mem_calloc(buffer_pool->base_allocator, 1, sizeof(struct aws_s3_buffer_pool_ticket)); + ticket->size = size; + if (size <= buffer_pool->primary_size_cutoff) { + buffer_pool->primary_reserved += size; + } else { + buffer_pool->secondary_reserved += size; + } + } else { + buffer_pool->has_reservation_hold = true; + } + + aws_mutex_unlock(&buffer_pool->mutex); + + if (ticket == NULL) { + AWS_LOGF_TRACE( + AWS_LS_S3_CLIENT, + "Memory limit reached while trying to allocate buffer of size %zu. " + "Putting new buffer reservations on hold...", + size); + aws_raise_error(AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT); + } + return ticket; +} + +bool aws_s3_buffer_pool_has_reservation_hold(struct aws_s3_buffer_pool *buffer_pool) { + AWS_PRECONDITION(buffer_pool); + AWS_LOGF_TRACE(AWS_LS_S3_CLIENT, "Releasing buffer reservation hold."); + return buffer_pool->has_reservation_hold; +} + +void aws_s3_buffer_pool_remove_reservation_hold(struct aws_s3_buffer_pool *buffer_pool) { + AWS_PRECONDITION(buffer_pool); + buffer_pool->has_reservation_hold = false; +} + +static uint8_t *s_primary_acquire_synced(struct aws_s3_buffer_pool *buffer_pool, size_t size, size_t *out_chunks_used) { + uint8_t *alloc_ptr = NULL; + + size_t chunks_needed = size / buffer_pool->chunk_size; + if (size % buffer_pool->chunk_size != 0) { + ++chunks_needed; /* round up */ + } + *out_chunks_used = chunks_needed; + + /* Look for space in existing blocks */ + for (size_t i = 0; i < aws_array_list_length(&buffer_pool->blocks); ++i) { + struct s3_buffer_pool_block *block; + aws_array_list_get_at_ptr(&buffer_pool->blocks, (void **)&block, i); + + for (size_t chunk_i = 0; chunk_i < s_chunks_per_block - chunks_needed + 1; ++chunk_i) { + if (!s_check_bits(block->alloc_bit_mask, chunk_i, chunks_needed)) { + alloc_ptr = block->block_ptr + chunk_i * buffer_pool->chunk_size; + block->alloc_bit_mask = s_set_bits(block->alloc_bit_mask, chunk_i, chunks_needed); + goto on_allocated; + } + } + } + + /* No space available. Allocate new block. */ + struct s3_buffer_pool_block block; + block.alloc_bit_mask = s_set_bits(0, 0, chunks_needed); + block.block_ptr = aws_mem_acquire(buffer_pool->base_allocator, buffer_pool->block_size); + block.block_size = buffer_pool->block_size; + aws_array_list_push_back(&buffer_pool->blocks, &block); + alloc_ptr = block.block_ptr; + + buffer_pool->primary_allocated += buffer_pool->block_size; + +on_allocated: + buffer_pool->primary_reserved -= size; + buffer_pool->primary_used += size; + + return alloc_ptr; +} + +struct aws_byte_buf aws_s3_buffer_pool_acquire_buffer( + struct aws_s3_buffer_pool *buffer_pool, + struct aws_s3_buffer_pool_ticket *ticket) { + AWS_PRECONDITION(buffer_pool); + AWS_PRECONDITION(ticket); + + if (ticket->ptr != NULL) { + return aws_byte_buf_from_empty_array(ticket->ptr, ticket->size); + } + + uint8_t *alloc_ptr = NULL; + + aws_mutex_lock(&buffer_pool->mutex); + + if (ticket->size <= buffer_pool->primary_size_cutoff) { + alloc_ptr = s_primary_acquire_synced(buffer_pool, ticket->size, &ticket->chunks_used); + } else { + alloc_ptr = aws_mem_acquire(buffer_pool->base_allocator, ticket->size); + buffer_pool->secondary_reserved -= ticket->size; + buffer_pool->secondary_used += ticket->size; + } + + aws_mutex_unlock(&buffer_pool->mutex); + ticket->ptr = alloc_ptr; + + return aws_byte_buf_from_empty_array(ticket->ptr, ticket->size); +} + +void aws_s3_buffer_pool_release_ticket( + struct aws_s3_buffer_pool *buffer_pool, + struct aws_s3_buffer_pool_ticket *ticket) { + + if (buffer_pool == NULL || ticket == NULL) { + return; + } + + if (ticket->ptr == NULL) { + /* Ticket was never used, make sure to clean up reserved count. */ + aws_mutex_lock(&buffer_pool->mutex); + if (ticket->size <= buffer_pool->primary_size_cutoff) { + buffer_pool->primary_reserved -= ticket->size; + } else { + buffer_pool->secondary_reserved -= ticket->size; + } + aws_mutex_unlock(&buffer_pool->mutex); + aws_mem_release(buffer_pool->base_allocator, ticket); + return; + } + + aws_mutex_lock(&buffer_pool->mutex); + if (ticket->size <= buffer_pool->primary_size_cutoff) { + + size_t chunks_used = ticket->size / buffer_pool->chunk_size; + if (ticket->size % buffer_pool->chunk_size != 0) { + ++chunks_used; /* round up */ + } + + bool found = false; + for (size_t i = 0; i < aws_array_list_length(&buffer_pool->blocks); ++i) { + struct s3_buffer_pool_block *block; + aws_array_list_get_at_ptr(&buffer_pool->blocks, (void **)&block, i); + + if (block->block_ptr <= ticket->ptr && block->block_ptr + block->block_size > ticket->ptr) { + size_t alloc_i = (ticket->ptr - block->block_ptr) / buffer_pool->chunk_size; + + block->alloc_bit_mask = s_clear_bits(block->alloc_bit_mask, alloc_i, chunks_used); + buffer_pool->primary_used -= ticket->size; + + found = true; + break; + } + } + + AWS_FATAL_ASSERT(found); + } else { + aws_mem_release(buffer_pool->base_allocator, ticket->ptr); + buffer_pool->secondary_used -= ticket->size; + } + + aws_mem_release(buffer_pool->base_allocator, ticket); + + aws_mutex_unlock(&buffer_pool->mutex); +} + +struct aws_s3_buffer_pool_usage_stats aws_s3_buffer_pool_get_usage(struct aws_s3_buffer_pool *buffer_pool) { + aws_mutex_lock(&buffer_pool->mutex); + + struct aws_s3_buffer_pool_usage_stats ret = (struct aws_s3_buffer_pool_usage_stats){ + .mem_limit = buffer_pool->mem_limit, + .primary_cutoff = buffer_pool->primary_size_cutoff, + .primary_allocated = buffer_pool->primary_allocated, + .primary_used = buffer_pool->primary_used, + .primary_reserved = buffer_pool->primary_reserved, + .primary_num_blocks = aws_array_list_length(&buffer_pool->blocks), + .secondary_used = buffer_pool->secondary_used, + .secondary_reserved = buffer_pool->secondary_reserved, + }; + + aws_mutex_unlock(&buffer_pool->mutex); + return ret; +} diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_checksums.c b/contrib/restricted/aws/aws-c-s3/source/s3_checksums.c index c16288535ec..0bfdc018122 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_checksums.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_checksums.c @@ -2,7 +2,6 @@ #include "aws/s3/private/s3_util.h" #include <aws/cal/hash.h> #include <aws/checksums/crc.h> -#include <aws/io/stream.h> #define AWS_CRC32_LEN 4 #define AWS_CRC32C_LEN 4 @@ -265,7 +264,7 @@ int aws_checksum_compute( case AWS_SCA_CRC32C: return aws_checksum_compute_fn(allocator, input, output, aws_crc32c_checksum_new, truncate_to); default: - return AWS_OP_ERR; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_chunk_stream.c b/contrib/restricted/aws/aws-c-s3/source/s3_chunk_stream.c index 14dea366431..40b9e80bfbd 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_chunk_stream.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_chunk_stream.c @@ -70,12 +70,12 @@ static int s_set_post_chunk_stream(struct aws_chunk_stream *parent_stream) { struct aws_byte_cursor checksum_result_cursor = aws_byte_cursor_from_buf(&parent_stream->checksum_result); if (parent_stream->checksum_result_output && aws_byte_buf_init_copy_from_cursor( - parent_stream->checksum_result_output, aws_default_allocator(), checksum_result_cursor)) { + parent_stream->checksum_result_output, parent_stream->allocator, checksum_result_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_init( &parent_stream->post_chunk_buffer, - aws_default_allocator(), + parent_stream->allocator, final_chunk_cursor.len + parent_stream->checksum_header_name->len + colon_cursor.len + checksum_result_cursor.len + post_trailer_cursor.len)) { goto error; @@ -88,7 +88,7 @@ static int s_set_post_chunk_stream(struct aws_chunk_stream *parent_stream) { goto error; } struct aws_byte_cursor post_chunk_cursor = aws_byte_cursor_from_buf(&parent_stream->post_chunk_buffer); - parent_stream->current_stream = aws_input_stream_new_from_cursor(aws_default_allocator(), &post_chunk_cursor); + parent_stream->current_stream = aws_input_stream_new_from_cursor(parent_stream->allocator, &post_chunk_cursor); parent_stream->set_current_stream_fn = s_set_null_stream; return AWS_OP_SUCCESS; error: diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_client.c b/contrib/restricted/aws/aws-c-s3/source/s3_client.c index 4dd80bd4f0f..678abf9a857 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_client.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_client.c @@ -5,18 +5,22 @@ #include "aws/s3/private/s3_auto_ranged_get.h" #include "aws/s3/private/s3_auto_ranged_put.h" +#include "aws/s3/private/s3_buffer_pool.h" #include "aws/s3/private/s3_client_impl.h" +#include "aws/s3/private/s3_copy_object.h" #include "aws/s3/private/s3_default_meta_request.h" #include "aws/s3/private/s3_meta_request_impl.h" +#include "aws/s3/private/s3_parallel_input_stream.h" #include "aws/s3/private/s3_request_messages.h" #include "aws/s3/private/s3_util.h" +#include "aws/s3/private/s3express_credentials_provider_impl.h" +#include "aws/s3/s3express_credentials_provider.h" #include <aws/auth/credentials.h> #include <aws/common/assert.h> #include <aws/common/atomics.h> #include <aws/common/clock.h> #include <aws/common/device_random.h> -#include <aws/common/environment.h> #include <aws/common/json.h> #include <aws/common/string.h> #include <aws/common/system_info.h> @@ -33,7 +37,6 @@ #include <aws/io/tls_channel_handler.h> #include <aws/io/uri.h> -#include <aws/s3/private/s3_copy_object.h> #include <inttypes.h> #include <math.h> @@ -48,31 +51,31 @@ struct aws_s3_meta_request_work { static const enum aws_log_level s_log_level_client_stats = AWS_LL_INFO; +/* max-requests-in-flight = ideal-num-connections * s_max_requests_multiplier */ static const uint32_t s_max_requests_multiplier = 4; -/* TODO Provide analysis on origins of this value. */ -static const double s_throughput_per_vip_gbps = 4.0; - -/* Preferred amount of active connections per meta request type. */ -const uint32_t g_num_conns_per_vip_meta_request_look_up[AWS_S3_META_REQUEST_TYPE_MAX] = { - 10, /* AWS_S3_META_REQUEST_TYPE_DEFAULT */ - 10, /* AWS_S3_META_REQUEST_TYPE_GET_OBJECT */ - 10, /* AWS_S3_META_REQUEST_TYPE_PUT_OBJECT */ - 10 /* AWS_S3_META_REQUEST_TYPE_COPY_OBJECT */ -}; +/* This is used to determine the ideal number of HTTP connections. Algorithm is roughly: + * num-connections-max = throughput-target-gbps / s_throughput_per_connection_gbps + * + * Magic value based on: match results of the previous algorithm, + * where throughput-target-gpbs of 100 resulted in 250 connections. + * + * TODO: Improve this algorithm (expect higher throughput for S3 Express, + * expect lower throughput for small objects, etc) + */ +static const double s_throughput_per_connection_gbps = 100.0 / 250; -/* Should be max of s_num_conns_per_vip_meta_request_look_up */ -const uint32_t g_max_num_connections_per_vip = 10; +/* After throughput math, clamp the min/max number of connections */ +const uint32_t g_min_num_connections = 10; /* Magic value based on: 10 was old behavior */ /** * Default part size is 8 MiB to reach the best performance from the experiments we had. - * Default max part size is SIZE_MAX on 32bit systems, which is around 4GiB; and 5GiB on a 64bit system. - * The server limit is 5GiB, but object size limit is 5TiB for now. We should be good enough for all the cases. - * The max number of upload parts is 10000, which limits the object size to 39TiB on 32bit and 49TiB on 64bit. + * Default max part size is 5GiB as the server limit. Object size limit is 5TiB for now. + * max number of upload parts is 10000. * TODO Provide more information on other values. */ static const size_t s_default_part_size = 8 * 1024 * 1024; -static const uint64_t s_default_max_part_size = SIZE_MAX < 5368709120ULL ? SIZE_MAX : 5368709120ULL; +static const uint64_t s_default_max_part_size = 5368709120ULL; static const double s_default_throughput_target_gbps = 10.0; static const uint32_t s_default_max_retries = 5; static size_t s_dns_host_address_ttl_seconds = 5 * 60; @@ -81,6 +84,13 @@ static size_t s_dns_host_address_ttl_seconds = 5 * 60; * 30 seconds mirrors the value currently used by the Java SDK. */ static const uint32_t s_default_throughput_failure_interval_seconds = 30; +/* Amount of time spent idling before trimming buffer. */ +static const size_t s_buffer_pool_trim_time_offset_in_s = 5; + +/* Interval for scheduling endpoints cleanup task. This is to trim endpoints with a zero reference + * count. S3 closes the idle connections in ~5 seconds. */ +static const uint32_t s_endpoints_cleanup_time_offset_in_s = 5; + /* Called when ref count is 0. */ static void s_s3_client_start_destroy(void *user_data); @@ -92,6 +102,8 @@ static void s_s3_client_body_streaming_elg_shutdown(void *user_data); static void s_s3_client_create_connection_for_request(struct aws_s3_client *client, struct aws_s3_request *request); +static void s_s3_endpoints_cleanup_task(struct aws_task *task, void *arg, enum aws_task_status task_status); + /* Callback which handles the HTTP connection retrieved by acquire_http_connection. */ static void s_s3_client_on_acquire_http_connection( struct aws_http_connection *http_connection, @@ -128,6 +140,7 @@ static struct aws_s3_client_vtable s_s3_client_default_vtable = { .process_work = s_s3_client_process_work_default, .endpoint_shutdown_callback = s_s3_client_endpoint_shutdown_callback, .finish_destroy = s_s3_client_finish_destroy_default, + .parallel_input_stream_new_from_file = aws_parallel_input_stream_new_from_file, }; void aws_s3_set_dns_ttl(size_t ttl) { @@ -145,32 +158,9 @@ uint32_t aws_s3_client_get_max_active_connections( struct aws_s3_client *client, struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(client); + (void)meta_request; - uint32_t num_connections_per_vip = g_max_num_connections_per_vip; - uint32_t num_vips = client->ideal_vip_count; - - if (meta_request != NULL) { - num_connections_per_vip = g_num_conns_per_vip_meta_request_look_up[meta_request->type]; - - struct aws_s3_endpoint *endpoint = meta_request->endpoint; - AWS_ASSERT(endpoint != NULL); - - AWS_ASSERT(client->vtable->get_host_address_count); - size_t num_known_vips = client->vtable->get_host_address_count( - client->client_bootstrap->host_resolver, endpoint->host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A); - - /* If the number of known vips is less than our ideal VIP count, clamp it. */ - if (num_known_vips < (size_t)num_vips) { - num_vips = (uint32_t)num_known_vips; - } - } - - /* We always want to allow for at least one VIP worth of connections. */ - if (num_vips == 0) { - num_vips = 1; - } - - uint32_t max_active_connections = num_vips * num_connections_per_vip; + uint32_t max_active_connections = client->ideal_connection_count; if (client->max_active_connections_override > 0 && client->max_active_connections_override < max_active_connections) { @@ -219,6 +209,39 @@ void aws_s3_client_unlock_synced_data(struct aws_s3_client *client) { aws_mutex_unlock(&client->synced_data.lock); } +static void s_s3express_provider_finish_destroy(void *user_data) { + struct aws_s3_client *client = user_data; + AWS_PRECONDITION(client); + /* BEGIN CRITICAL SECTION */ + { + aws_s3_client_lock_synced_data(client); + client->synced_data.s3express_provider_active = false; + /* Schedule the work task to call s_s3_client_finish_destroy function if + * everything cleaning up asynchronously has finished. */ + s_s3_client_schedule_process_work_synced(client); + aws_s3_client_unlock_synced_data(client); + } + /* END CRITICAL SECTION */ +} + +struct aws_s3express_credentials_provider *s_s3express_provider_default_factory( + struct aws_allocator *allocator, + struct aws_s3_client *client, + aws_simple_completion_callback on_provider_shutdown_callback, + void *shutdown_user_data, + void *factory_user_data) { + (void)factory_user_data; + + struct aws_s3express_credentials_provider_default_options options = { + .client = client, + .shutdown_complete_callback = on_provider_shutdown_callback, + .shutdown_user_data = shutdown_user_data, + }; + struct aws_s3express_credentials_provider *s3express_provider = + aws_s3express_credentials_provider_new_default(allocator, &options); + return s3express_provider; +} + struct aws_s3_client *aws_s3_client_new( struct aws_allocator *allocator, const struct aws_s3_client_config *client_config) { @@ -243,6 +266,33 @@ struct aws_s3_client *aws_s3_client_new( return NULL; } + if (client_config->signing_config == NULL) { + AWS_LOGF_ERROR(AWS_LS_S3_CLIENT, "Cannot create client from client_config; signing_config is required."); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + + if (client_config->signing_config->credentials == NULL && + client_config->signing_config->credentials_provider == NULL) { + AWS_LOGF_ERROR( + AWS_LS_S3_CLIENT, + "Cannot create client from client_config; Invalid signing_config provided, either credentials or " + "credentials provider has to be set."); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + + if (!client_config->enable_s3express && + client_config->signing_config->algorithm == AWS_SIGNING_ALGORITHM_V4_S3EXPRESS) { + AWS_LOGF_ERROR( + AWS_LS_S3_CLIENT, + "Cannot create client from client_config; Client config is set use S3 Express signing, but S3 Express " + "support is " + "not configured."); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + #ifdef BYO_CRYPTO if (client_config->tls_mode == AWS_MR_TLS_ENABLED && client_config->tls_connection_options == NULL) { AWS_LOGF_ERROR( @@ -257,12 +307,65 @@ struct aws_s3_client *aws_s3_client_new( struct aws_s3_client *client = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_client)); client->allocator = allocator; + + size_t mem_limit = 0; + if (client_config->memory_limit_in_bytes == 0) { +#if SIZE_BITS == 32 + if (client_config->throughput_target_gbps > 25.0) { + mem_limit = GB_TO_BYTES(2); + } else { + mem_limit = GB_TO_BYTES(1); + } +#else + if (client_config->throughput_target_gbps > 75.0) { + mem_limit = GB_TO_BYTES(8); + } else if (client_config->throughput_target_gbps > 25.0) { + mem_limit = GB_TO_BYTES(4); + } else { + mem_limit = GB_TO_BYTES(2); + } +#endif + } else { + // cap memory limit to SIZE_MAX + if (client_config->memory_limit_in_bytes > SIZE_MAX) { + mem_limit = SIZE_MAX; + } else { + mem_limit = (size_t)client_config->memory_limit_in_bytes; + } + } + + size_t part_size = s_default_part_size; + if (client_config->part_size != 0) { + if (client_config->part_size > SIZE_MAX) { + part_size = SIZE_MAX; + } else { + part_size = (size_t)client_config->part_size; + } + } + + client->buffer_pool = aws_s3_buffer_pool_new(allocator, part_size, mem_limit); + + if (client->buffer_pool == NULL) { + goto on_early_fail; + } + + struct aws_s3_buffer_pool_usage_stats pool_usage = aws_s3_buffer_pool_get_usage(client->buffer_pool); + + if (client_config->max_part_size > pool_usage.mem_limit) { + AWS_LOGF_ERROR( + AWS_LS_S3_CLIENT, + "Cannot create client from client_config; configured max part size should not exceed memory limit." + "size."); + aws_raise_error(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG); + goto on_early_fail; + } + client->vtable = &s_s3_client_default_vtable; aws_ref_count_init(&client->ref_count, client, (aws_simple_completion_callback *)s_s3_client_start_destroy); if (aws_mutex_init(&client->synced_data.lock) != AWS_OP_SUCCESS) { - goto lock_init_fail; + goto on_early_fail; } aws_linked_list_init(&client->synced_data.pending_meta_request_work); @@ -278,7 +381,7 @@ struct aws_s3_client *aws_s3_client_new( } aws_atomic_init_int(&client->stats.num_requests_stream_queued_waiting, 0); - aws_atomic_init_int(&client->stats.num_requests_streaming, 0); + aws_atomic_init_int(&client->stats.num_requests_streaming_response, 0); *((uint32_t *)&client->max_active_connections_override) = client_config->max_active_connections_override; @@ -293,20 +396,32 @@ struct aws_s3_client *aws_s3_client_new( /* Make a copy of the region string. */ client->region = aws_string_new_from_array(allocator, client_config->region.ptr, client_config->region.len); - if (client_config->part_size != 0) { - *((size_t *)&client->part_size) = client_config->part_size; + *((size_t *)&client->part_size) = part_size; + + if (client_config->max_part_size != 0) { + *((uint64_t *)&client->max_part_size) = client_config->max_part_size; } else { - *((size_t *)&client->part_size) = s_default_part_size; + *((uint64_t *)&client->max_part_size) = s_default_max_part_size; } - if (client_config->max_part_size != 0) { - *((size_t *)&client->max_part_size) = client_config->max_part_size; + if (client_config->max_part_size > pool_usage.mem_limit) { + *((uint64_t *)&client->max_part_size) = pool_usage.mem_limit; + } + + if (client->max_part_size > SIZE_MAX) { + /* For the 32bit max part size to be SIZE_MAX */ + *((uint64_t *)&client->max_part_size) = SIZE_MAX; + } + + if (client_config->multipart_upload_threshold != 0) { + *((uint64_t *)&client->multipart_upload_threshold) = client_config->multipart_upload_threshold; } else { - *((size_t *)&client->max_part_size) = (size_t)s_default_max_part_size; + *((uint64_t *)&client->multipart_upload_threshold) = + part_size > g_s3_min_upload_part_size ? part_size : g_s3_min_upload_part_size; } if (client_config->max_part_size < client_config->part_size) { - *((size_t *)&client_config->max_part_size) = client_config->part_size; + *((uint64_t *)&client_config->max_part_size) = client_config->part_size; } client->connect_timeout_ms = client_config->connect_timeout_ms; @@ -388,13 +503,9 @@ struct aws_s3_client *aws_s3_client_new( .shutdown_callback_user_data = client, }; - if (aws_get_cpu_group_count() > 1) { - client->body_streaming_elg = aws_event_loop_group_new_default_pinned_to_cpu_group( - client->allocator, num_streaming_threads, 1, &body_streaming_elg_shutdown_options); - } else { - client->body_streaming_elg = aws_event_loop_group_new_default( - client->allocator, num_streaming_threads, &body_streaming_elg_shutdown_options); - } + client->body_streaming_elg = aws_event_loop_group_new_default( + client->allocator, num_streaming_threads, &body_streaming_elg_shutdown_options); + if (!client->body_streaming_elg) { /* Fail to create elg, we should fail the call */ goto on_error; @@ -403,7 +514,7 @@ struct aws_s3_client *aws_s3_client_new( } /* Setup cannot fail after this point. */ - if (client_config->throughput_target_gbps != 0.0) { + if (client_config->throughput_target_gbps > 0.0) { *((double *)&client->throughput_target_gbps) = client_config->throughput_target_gbps; } else { *((double *)&client->throughput_target_gbps) = s_default_throughput_target_gbps; @@ -412,14 +523,24 @@ struct aws_s3_client *aws_s3_client_new( *((enum aws_s3_meta_request_compute_content_md5 *)&client->compute_content_md5) = client_config->compute_content_md5; - /* Determine how many vips are ideal by dividing target-throughput by throughput-per-vip. */ + /* Determine how many connections are ideal by dividing target-throughput by throughput-per-connection. */ { - double ideal_vip_count_double = client->throughput_target_gbps / s_throughput_per_vip_gbps; - *((uint32_t *)&client->ideal_vip_count) = (uint32_t)ceil(ideal_vip_count_double); - } - - if (client_config->signing_config) { - client->cached_signing_config = aws_cached_signing_config_new(client->allocator, client_config->signing_config); + double ideal_connection_count_double = client->throughput_target_gbps / s_throughput_per_connection_gbps; + /* round up and clamp */ + ideal_connection_count_double = ceil(ideal_connection_count_double); + ideal_connection_count_double = aws_max_double(g_min_num_connections, ideal_connection_count_double); + ideal_connection_count_double = aws_min_double(UINT32_MAX, ideal_connection_count_double); + *(uint32_t *)&client->ideal_connection_count = (uint32_t)ideal_connection_count_double; + } + + client->cached_signing_config = aws_cached_signing_config_new(client, client_config->signing_config); + if (client_config->enable_s3express) { + if (client_config->s3express_provider_override_factory) { + client->s3express_provider_factory = client_config->s3express_provider_override_factory; + client->factory_user_data = client_config->factory_user_data; + } else { + client->s3express_provider_factory = s_s3express_provider_default_factory; + } } client->synced_data.active = true; @@ -448,6 +569,8 @@ struct aws_s3_client *aws_s3_client_new( aws_hash_callback_string_eq, aws_hash_callback_string_destroy, NULL); + aws_task_init( + &client->synced_data.endpoints_cleanup_task, s_s3_endpoints_cleanup_task, client, "s3_endpoints_cleanup_task"); /* Initialize shutdown options and tracking. */ client->shutdown_callback = client_config->shutdown_callback; @@ -480,7 +603,7 @@ on_error: aws_event_loop_group_release(client->client_bootstrap->event_loop_group); aws_client_bootstrap_release(client->client_bootstrap); aws_mutex_clean_up(&client->synced_data.lock); -lock_init_fail: +on_early_fail: aws_mem_release(client->allocator, client); return NULL; } @@ -514,7 +637,11 @@ static void s_s3_client_start_destroy(void *user_data) { aws_s3_client_lock_synced_data(client); client->synced_data.active = false; - + if (!client->synced_data.endpoints_cleanup_task_scheduled) { + client->synced_data.endpoints_cleanup_task_scheduled = true; + aws_event_loop_schedule_task_now( + client->process_work_event_loop, &client->synced_data.endpoints_cleanup_task); + } /* Prevent the client from cleaning up in between the mutex unlock/re-lock below.*/ client->synced_data.start_destroy_executing = true; @@ -524,6 +651,7 @@ static void s_s3_client_start_destroy(void *user_data) { aws_event_loop_group_release(client->body_streaming_elg); client->body_streaming_elg = NULL; + aws_s3express_credentials_provider_release(client->s3express_provider); /* BEGIN CRITICAL SECTION */ { @@ -543,6 +671,10 @@ static void s_s3_client_finish_destroy_default(struct aws_s3_client *client) { AWS_LOGF_DEBUG(AWS_LS_S3_CLIENT, "id=%p Client finishing destruction.", (void *)client); + if (client->threaded_data.trim_buffer_pool_task_scheduled) { + aws_event_loop_cancel_task(client->process_work_event_loop, &client->synced_data.trim_buffer_pool_task); + } + aws_string_destroy(client->region); client->region = NULL; @@ -580,6 +712,7 @@ static void s_s3_client_finish_destroy_default(struct aws_s3_client *client) { aws_s3_client_shutdown_complete_callback_fn *shutdown_callback = client->shutdown_callback; void *shutdown_user_data = client->shutdown_callback_user_data; + aws_s3_buffer_pool_destroy(client->buffer_pool); aws_mem_release(client->allocator, client); client = NULL; @@ -610,6 +743,9 @@ uint32_t aws_s3_client_queue_requests_threaded( bool queue_front) { AWS_PRECONDITION(client); AWS_PRECONDITION(request_list); + if (aws_linked_list_empty(request_list)) { + return 0; + } uint32_t request_list_size = 0; @@ -704,6 +840,13 @@ struct aws_s3_meta_request *aws_s3_client_make_meta_request( AWS_PRECONDITION(client->vtable->meta_request_factory); AWS_PRECONDITION(options); + bool use_s3express_signing = false; + if (options->signing_config != NULL) { + use_s3express_signing = options->signing_config->algorithm == AWS_SIGNING_ALGORITHM_V4_S3EXPRESS; + } else if (client->cached_signing_config) { + use_s3express_signing = client->cached_signing_config->config.algorithm == AWS_SIGNING_ALGORITHM_V4_S3EXPRESS; + } + if (options->type >= AWS_S3_META_REQUEST_TYPE_MAX) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, @@ -722,6 +865,15 @@ struct aws_s3_meta_request *aws_s3_client_make_meta_request( return NULL; } + if (use_s3express_signing && client->s3express_provider_factory == NULL) { + AWS_LOGF_ERROR( + AWS_LS_S3_CLIENT, + "id=%p Cannot create meta s3 request; client doesn't support S3 Express signing.", + (void *)client); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + struct aws_http_headers *message_headers = aws_http_message_get_headers(options->message); if (message_headers == NULL) { @@ -762,7 +914,7 @@ struct aws_s3_meta_request *aws_s3_client_make_meta_request( options->checksum_config->checksum_algorithm == AWS_SCA_NONE) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, - "id=%p Cannot create meta s3 request; checksum algorithm must be set to calculate checksum.", + "id=%p Cannot create meta s3 request; checksum location is set, but no checksum algorithm selected.", (void *)client); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; @@ -771,8 +923,7 @@ struct aws_s3_meta_request *aws_s3_client_make_meta_request( options->checksum_config->location == AWS_SCL_NONE) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, - "id=%p Cannot create meta s3 request; checksum algorithm cannot be set if not calculate checksum from " - "client.", + "id=%p Cannot create meta s3 request; checksum algorithm is set, but no checksum location selected.", (void *)client); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; @@ -788,7 +939,7 @@ struct aws_s3_meta_request *aws_s3_client_make_meta_request( AWS_FATAL_ASSERT(aws_http_headers_get(message_headers, g_host_header_name, &host_header_value) == AWS_OP_SUCCESS); bool is_https = true; - uint16_t port = 0; + uint32_t port = 0; if (options->endpoint != NULL) { struct aws_byte_cursor https_scheme = aws_byte_cursor_from_c_str("https"); @@ -824,6 +975,34 @@ struct aws_s3_meta_request *aws_s3_client_make_meta_request( { aws_s3_client_lock_synced_data(client); + if (use_s3express_signing && !client->synced_data.s3express_provider_active) { + + AWS_LOGF_TRACE(AWS_LS_S3_CLIENT, "id=%p Create S3 Express provider for the client.", (void *)client); + /** + * Invoke the factory within the lock. We WARNED people uses their own factory to not use ANY client related + * api during the factory. + * + * We cannot just release the lock and invoke the factory, because it can lead to the other request assume + * the provider is active, and not waiting for the provider to be created. And lead to unexpected behavior. + */ + client->s3express_provider = client->s3express_provider_factory( + client->allocator, client, s_s3express_provider_finish_destroy, client, client->factory_user_data); + + /* Provider is related to client, we don't need to clean it up if meta request failed. But, if provider + * failed to be created, let's bail out earlier. */ + if (!client->s3express_provider) { + AWS_LOGF_ERROR( + AWS_LS_S3_CLIENT, + "id=%p Failed to create S3 Express provider for client due to error %d (%s)", + (void *)client, + aws_last_error_or_unknown(), + aws_error_str(aws_last_error_or_unknown())); + error_occurred = true; + goto unlock; + } + client->synced_data.s3express_provider_active = true; + } + struct aws_string *endpoint_host_name = NULL; if (options->endpoint != NULL) { @@ -842,6 +1021,10 @@ struct aws_s3_meta_request *aws_s3_client_make_meta_request( struct aws_s3_endpoint *endpoint = NULL; struct aws_hash_element *endpoint_hash_element = NULL; + if (use_s3express_signing) { + meta_request->s3express_session_host = aws_string_new_from_string(client->allocator, endpoint_host_name); + } + int was_created = 0; if (aws_hash_table_create( &client->synced_data.endpoints, endpoint_host_name, &endpoint_hash_element, &was_created)) { @@ -930,12 +1113,12 @@ static struct aws_s3_meta_request *s_s3_client_meta_request_factory_default( AWS_PRECONDITION(client); AWS_PRECONDITION(options); - struct aws_http_headers *initial_message_headers = aws_http_message_get_headers(options->message); + const struct aws_http_headers *initial_message_headers = aws_http_message_get_headers(options->message); AWS_ASSERT(initial_message_headers); uint64_t content_length = 0; struct aws_byte_cursor content_length_cursor; - bool content_length_header_found = false; + bool content_length_found = false; if (!aws_http_headers_get(initial_message_headers, g_content_length_header_name, &content_length_cursor)) { if (aws_byte_cursor_utf8_parse_u64(content_length_cursor, &content_length)) { @@ -946,59 +1129,116 @@ static struct aws_s3_meta_request *s_s3_client_meta_request_factory_default( aws_raise_error(AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER); return NULL; } - content_length_header_found = true; + content_length_found = true; + } + + /* There are multiple ways to pass the body in, ensure only 1 was used */ + int body_source_count = 0; + if (aws_http_message_get_body_stream(options->message) != NULL) { + ++body_source_count; + } + if (options->send_filepath.len > 0) { + ++body_source_count; + } + if (options->send_using_async_writes == true) { + if (options->type != AWS_S3_META_REQUEST_TYPE_PUT_OBJECT) { + /* TODO: we could support async-writes for DEFAULT type too, just takes work & testing */ + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "Could not create meta request." + "send-using-data-writes can only be used with auto-ranged-put."); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + if (content_length_found) { + /* TODO: we could support async-writes with content-length, just takes work & testing */ + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "Could not create meta request." + "send-using-data-writes can only be used when Content-Length is unknown."); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + ++body_source_count; + } + if (options->send_async_stream != NULL) { + ++body_source_count; + } + if (body_source_count > 1) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "Could not create meta request." + " More than one data source is set (filepath, async stream, body stream, data writes)."); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + size_t part_size = client->part_size; + if (options->part_size != 0) { + if (options->part_size > SIZE_MAX) { + part_size = SIZE_MAX; + } else { + part_size = (size_t)options->part_size; + } } /* Call the appropriate meta-request new function. */ switch (options->type) { case AWS_S3_META_REQUEST_TYPE_GET_OBJECT: { - /* If the initial request already has partNumber, the request is not - * splittable(?). Treat it as a Default request. - * TODO: Still need tests to verify that the request of a part is - * splittable or not */ - if (aws_http_headers_has(initial_message_headers, aws_byte_cursor_from_c_str("partNumber"))) { - return aws_s3_meta_request_default_new(client->allocator, client, content_length, false, options); + struct aws_byte_cursor path_and_query; + + if (aws_http_message_get_request_path(options->message, &path_and_query) == AWS_OP_SUCCESS) { + /* If the initial request already has partNumber, the request is not + * splittable(?). Treat it as a Default request. + * TODO: Still need tests to verify that the request of a part is + * splittable or not */ + struct aws_byte_cursor sub_string; + AWS_ZERO_STRUCT(sub_string); + /* The first split on '?' for path and query is path, the second is query */ + if (aws_byte_cursor_next_split(&path_and_query, '?', &sub_string) == true) { + aws_byte_cursor_next_split(&path_and_query, '?', &sub_string); + struct aws_uri_param param; + AWS_ZERO_STRUCT(param); + struct aws_byte_cursor part_number_query_str = aws_byte_cursor_from_c_str("partNumber"); + while (aws_query_string_next_param(sub_string, ¶m)) { + if (aws_byte_cursor_eq(¶m.key, &part_number_query_str)) { + return aws_s3_meta_request_default_new( + client->allocator, + client, + AWS_S3_REQUEST_TYPE_GET_OBJECT, + content_length, + false /*should_compute_content_md5*/, + options); + } + } + } } - - return aws_s3_meta_request_auto_ranged_get_new(client->allocator, client, client->part_size, options); + return aws_s3_meta_request_auto_ranged_get_new(client->allocator, client, part_size, options); } case AWS_S3_META_REQUEST_TYPE_PUT_OBJECT: { - - if (!content_length_header_found) { - AWS_LOGF_ERROR( - AWS_LS_S3_META_REQUEST, - "Could not create auto-ranged-put meta request; there is no Content-Length header present."); - aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - return NULL; - } - - struct aws_input_stream *input_stream = aws_http_message_get_body_stream(options->message); - - if ((input_stream == NULL) && (options->send_filepath.len == 0)) { + if (body_source_count == 0) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, - "Could not create auto-ranged-put meta request; filepath or body stream must be set."); + "Could not create auto-ranged-put meta request." + " Body must be set via filepath, async stream, or body stream."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (options->resume_token == NULL) { + uint64_t client_max_part_size = client->max_part_size; - size_t client_part_size = client->part_size; - size_t client_max_part_size = client->max_part_size; - - if (client_part_size < g_s3_min_upload_part_size) { + if (part_size < g_s3_min_upload_part_size) { AWS_LOGF_WARN( AWS_LS_S3_META_REQUEST, - "Client config part size of %" PRIu64 " is less than the minimum upload part size of %" PRIu64 + "Config part size of %" PRIu64 " is less than the minimum upload part size of %" PRIu64 ". Using to the minimum part-size for upload.", - (uint64_t)client_part_size, + (uint64_t)part_size, (uint64_t)g_s3_min_upload_part_size); - client_part_size = g_s3_min_upload_part_size; + part_size = g_s3_min_upload_part_size; } - if (client_max_part_size < g_s3_min_upload_part_size) { + if (client_max_part_size < (uint64_t)g_s3_min_upload_part_size) { AWS_LOGF_WARN( AWS_LS_S3_META_REQUEST, "Client config max part size of %" PRIu64 @@ -1007,82 +1247,84 @@ static struct aws_s3_meta_request *s_s3_client_meta_request_factory_default( (uint64_t)client_max_part_size, (uint64_t)g_s3_min_upload_part_size); - client_max_part_size = g_s3_min_upload_part_size; + client_max_part_size = (uint64_t)g_s3_min_upload_part_size; + } + + uint32_t num_parts = 0; + if (content_length_found) { + size_t out_part_size = 0; + if (aws_s3_calculate_optimal_mpu_part_size_and_num_parts( + content_length, part_size, client_max_part_size, &out_part_size, &num_parts)) { + return NULL; + } + part_size = out_part_size; } - if (content_length <= client_part_size) { + if (part_size != options->part_size && part_size != client->part_size) { + AWS_LOGF_DEBUG( + AWS_LS_S3_META_REQUEST, + "The multipart upload part size has been adjusted to %" PRIu64 "", + (uint64_t)part_size); + } + + /* Default to client level setting */ + uint64_t multipart_upload_threshold = client->multipart_upload_threshold; + if (options->multipart_upload_threshold != 0) { + /* If the threshold is set for the meta request, use it */ + multipart_upload_threshold = options->multipart_upload_threshold; + } else if (options->part_size != 0) { + /* If the threshold is not set, but the part size is set for the meta request, use it */ + multipart_upload_threshold = part_size; + } + + if (content_length_found && content_length <= multipart_upload_threshold) { return aws_s3_meta_request_default_new( client->allocator, client, + AWS_S3_REQUEST_TYPE_PUT_OBJECT, content_length, client->compute_content_md5 == AWS_MR_CONTENT_MD5_ENABLED && !aws_http_headers_has(initial_message_headers, g_content_md5_header_name), options); } else { if (aws_s3_message_util_check_checksum_header(options->message)) { - /* The checksum header has been set and the request will be splitted. We fail the request */ + /* The checksum header has been set and the request will be split. We fail the request */ AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not create auto-ranged-put meta request; checksum headers has been set for " "auto-ranged-put that will be split. Pre-calculated checksums are only supported for " - "single " - "part upload."); + "single part upload."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } } - uint64_t part_size_uint64 = content_length / (uint64_t)g_s3_max_num_upload_parts; - - if (part_size_uint64 > SIZE_MAX) { - AWS_LOGF_ERROR( - AWS_LS_S3_META_REQUEST, - "Could not create auto-ranged-put meta request; required part size of %" PRIu64 - " bytes is too large for platform.", - part_size_uint64); - - aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - return NULL; - } - - size_t part_size = (size_t)part_size_uint64; - - if (part_size > client_max_part_size) { + return aws_s3_meta_request_auto_ranged_put_new( + client->allocator, client, part_size, content_length_found, content_length, num_parts, options); + } else { /* else using resume token */ + if (!content_length_found) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, - "Could not create auto-ranged-put meta request; required part size for put request is %" PRIu64 - ", but current maximum part size is %" PRIu64, - (uint64_t)part_size, - (uint64_t)client_max_part_size); + "Could not create auto-ranged-put resume meta request; content_length must be specified."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } - if (part_size < client_part_size) { - part_size = client_part_size; - } - - uint32_t num_parts = (uint32_t)(content_length / part_size); - - if ((content_length % part_size) > 0) { - ++num_parts; - } - + /* don't pass part size and total num parts. constructor will pick it up from token */ return aws_s3_meta_request_auto_ranged_put_new( - client->allocator, client, part_size, content_length, num_parts, options); - } else { - /* dont pass part size and total num parts. constructor will pick it up from token */ - return aws_s3_meta_request_auto_ranged_put_new( - client->allocator, client, 0, content_length, 0, options); + client->allocator, client, 0, true, content_length, 0, options); } } case AWS_S3_META_REQUEST_TYPE_COPY_OBJECT: { - /* TODO: support copy object correctly. */ - AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "CopyObject is not currently supported"); - aws_raise_error(AWS_ERROR_UNIMPLEMENTED); - return NULL; + return aws_s3_meta_request_copy_object_new(client->allocator, client, options); } case AWS_S3_META_REQUEST_TYPE_DEFAULT: - return aws_s3_meta_request_default_new(client->allocator, client, content_length, false, options); + return aws_s3_meta_request_default_new( + client->allocator, + client, + AWS_S3_REQUEST_TYPE_UNKNOWN, + content_length, + false /*should_compute_content_md5*/, + options); default: AWS_FATAL_ASSERT(false); } @@ -1100,8 +1342,7 @@ static void s_s3_client_push_meta_request_synced( struct aws_s3_meta_request_work *meta_request_work = aws_mem_calloc(client->allocator, 1, sizeof(struct aws_s3_meta_request_work)); - aws_s3_meta_request_acquire(meta_request); - meta_request_work->meta_request = meta_request; + meta_request_work->meta_request = aws_s3_meta_request_acquire(meta_request); aws_linked_list_push_back(&client->synced_data.pending_meta_request_work, &meta_request_work->node); } @@ -1130,6 +1371,110 @@ static void s_s3_client_schedule_process_work_synced_default(struct aws_s3_clien client->synced_data.process_work_task_scheduled = true; } +/* Task function for trying to find a request that can be processed. */ +static void s_s3_client_trim_buffer_pool_task(struct aws_task *task, void *arg, enum aws_task_status task_status) { + AWS_PRECONDITION(task); + (void)task; + (void)task_status; + + if (task_status != AWS_TASK_STATUS_RUN_READY) { + return; + } + + struct aws_s3_client *client = arg; + AWS_PRECONDITION(client); + + client->threaded_data.trim_buffer_pool_task_scheduled = false; + + uint32_t num_reqs_in_flight = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_in_flight); + + if (num_reqs_in_flight == 0) { + aws_s3_buffer_pool_trim(client->buffer_pool); + } +} + +static void s_s3_client_schedule_buffer_pool_trim_synced(struct aws_s3_client *client) { + ASSERT_SYNCED_DATA_LOCK_HELD(client); + + if (client->threaded_data.trim_buffer_pool_task_scheduled) { + return; + } + + uint32_t num_reqs_in_flight = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_in_flight); + if (num_reqs_in_flight > 0) { + return; + } + + aws_task_init( + &client->synced_data.trim_buffer_pool_task, + s_s3_client_trim_buffer_pool_task, + client, + "s3_client_buffer_pool_trim_task"); + + uint64_t trim_time = 0; + aws_event_loop_current_clock_time(client->process_work_event_loop, &trim_time); + trim_time += + aws_timestamp_convert(s_buffer_pool_trim_time_offset_in_s, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); + + aws_event_loop_schedule_task_future( + client->process_work_event_loop, &client->synced_data.trim_buffer_pool_task, trim_time); + + client->threaded_data.trim_buffer_pool_task_scheduled = true; +} + +static void s_s3_endpoints_cleanup_task(struct aws_task *task, void *arg, enum aws_task_status task_status) { + (void)task; + (void)task_status; + + struct aws_s3_client *client = arg; + struct aws_array_list endpoints_to_release; + aws_array_list_init_dynamic(&endpoints_to_release, client->allocator, 5, sizeof(struct aws_s3_endpoint *)); + + /* BEGIN CRITICAL SECTION */ + aws_s3_client_lock_synced_data(client); + client->synced_data.endpoints_cleanup_task_scheduled = false; + + for (struct aws_hash_iter iter = aws_hash_iter_begin(&client->synced_data.endpoints); !aws_hash_iter_done(&iter); + aws_hash_iter_next(&iter)) { + struct aws_s3_endpoint *endpoint = (struct aws_s3_endpoint *)iter.element.value; + if (endpoint->client_synced_data.ref_count == 0) { + aws_array_list_push_back(&endpoints_to_release, &endpoint); + aws_hash_iter_delete(&iter, true); + } + } + + /* END CRITICAL SECTION */ + aws_s3_client_unlock_synced_data(client); + + /* now destroy all endpoints without holding the lock */ + size_t list_size = aws_array_list_length(&endpoints_to_release); + for (size_t i = 0; i < list_size; ++i) { + struct aws_s3_endpoint *endpoint; + aws_array_list_get_at(&endpoints_to_release, &endpoint, i); + aws_s3_endpoint_destroy(endpoint); + } + + /* Clean up the array list */ + aws_array_list_clean_up(&endpoints_to_release); + + aws_s3_client_schedule_process_work(client); +} + +static void s_s3_client_schedule_endpoints_cleanup_synced(struct aws_s3_client *client) { + ASSERT_SYNCED_DATA_LOCK_HELD(client); + if (client->synced_data.endpoints_cleanup_task_scheduled) { + return; + } + client->synced_data.endpoints_cleanup_task_scheduled = true; + uint64_t now_ns = 0; + aws_event_loop_current_clock_time(client->process_work_event_loop, &now_ns); + aws_event_loop_schedule_task_future( + client->process_work_event_loop, + &client->synced_data.endpoints_cleanup_task, + now_ns + + aws_timestamp_convert(s_endpoints_cleanup_time_offset_in_s, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); +} + void aws_s3_client_schedule_process_work(struct aws_s3_client *client) { AWS_PRECONDITION(client); @@ -1180,7 +1525,7 @@ static void s_s3_client_process_work_default(struct aws_s3_client *client) { aws_linked_list_init(&meta_request_work_list); /*******************/ - /* Step 1: Move relevant data into thread local memory. */ + /* Step 1: Move relevant data into thread local memory and schedule cleanups */ /*******************/ AWS_LOGF_DEBUG( AWS_LS_S3_CLIENT, @@ -1193,6 +1538,17 @@ static void s_s3_client_process_work_default(struct aws_s3_client *client) { client->synced_data.process_work_task_scheduled = false; client->synced_data.process_work_task_in_progress = true; + if (client->synced_data.active) { + s_s3_client_schedule_buffer_pool_trim_synced(client); + s_s3_client_schedule_endpoints_cleanup_synced(client); + } else if (client->synced_data.endpoints_cleanup_task_scheduled) { + client->synced_data.endpoints_cleanup_task_scheduled = false; + /* Cancel the task to run it sync */ + aws_s3_client_unlock_synced_data(client); + aws_event_loop_cancel_task(client->process_work_event_loop, &client->synced_data.endpoints_cleanup_task); + aws_s3_client_lock_synced_data(client); + } + aws_linked_list_swap_contents(&meta_request_work_list, &client->synced_data.pending_meta_request_work); uint32_t num_requests_queued = @@ -1284,28 +1640,33 @@ static void s_s3_client_process_work_default(struct aws_s3_client *client) { uint32_t num_requests_stream_queued_waiting = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_stream_queued_waiting); - uint32_t num_requests_streaming = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_streaming); + + uint32_t num_requests_being_prepared = client->threaded_data.num_requests_being_prepared; + + uint32_t num_requests_streaming_response = + (uint32_t)aws_atomic_load_int(&client->stats.num_requests_streaming_response); uint32_t total_approx_requests = num_requests_network_io + num_requests_stream_queued_waiting + - num_requests_streaming + client->threaded_data.num_requests_being_prepared + + num_requests_streaming_response + num_requests_being_prepared + client->threaded_data.request_queue_size; AWS_LOGF( s_log_level_client_stats, AWS_LS_S3_CLIENT_STATS, "id=%p Requests-in-flight(approx/exact):%d/%d Requests-preparing:%d Requests-queued:%d " - "Requests-network(get/put/default/total):%d/%d/%d/%d Requests-streaming-waiting:%d Requests-streaming:%d " + "Requests-network(get/put/default/total):%d/%d/%d/%d Requests-streaming-waiting:%d " + "Requests-streaming-response:%d " " Endpoints(in-table/allocated):%d/%d", (void *)client, total_approx_requests, num_requests_tracked_requests, - client->threaded_data.num_requests_being_prepared, + num_requests_being_prepared, client->threaded_data.request_queue_size, num_auto_ranged_get_network_io, num_auto_ranged_put_network_io, num_auto_default_network_io, num_requests_network_io, num_requests_stream_queued_waiting, - num_requests_streaming, + num_requests_streaming_response, num_endpoints_in_table, num_endpoints_allocated); } @@ -1321,12 +1682,12 @@ static void s_s3_client_process_work_default(struct aws_s3_client *client) { /* This flag should never be set twice. If it was, that means a double-free could occur.*/ AWS_ASSERT(!client->synced_data.finish_destroy); - bool finish_destroy = client->synced_data.active == false && - client->synced_data.start_destroy_executing == false && - client->synced_data.body_streaming_elg_allocated == false && - client->synced_data.process_work_task_scheduled == false && - client->synced_data.process_work_task_in_progress == false && - client->synced_data.num_endpoints_allocated == 0; + bool finish_destroy = + client->synced_data.active == false && client->synced_data.start_destroy_executing == false && + client->synced_data.body_streaming_elg_allocated == false && + client->synced_data.process_work_task_scheduled == false && + client->synced_data.process_work_task_in_progress == false && + client->synced_data.s3express_provider_active == false && client->synced_data.num_endpoints_allocated == 0; client->synced_data.finish_destroy = finish_destroy; @@ -1335,13 +1696,14 @@ static void s_s3_client_process_work_default(struct aws_s3_client *client) { AWS_LS_S3_CLIENT, "id=%p Client shutdown progress: starting_destroy_executing=%d body_streaming_elg_allocated=%d " "process_work_task_scheduled=%d process_work_task_in_progress=%d num_endpoints_allocated=%d " - "finish_destroy=%d", + "s3express_provider_active=%d finish_destroy=%d", (void *)client, (int)client->synced_data.start_destroy_executing, (int)client->synced_data.body_streaming_elg_allocated, (int)client->synced_data.process_work_task_scheduled, (int)client->synced_data.process_work_task_in_progress, (int)client->synced_data.num_endpoints_allocated, + (int)client->synced_data.s3express_provider_active, (int)client->synced_data.finish_destroy); } @@ -1360,6 +1722,54 @@ static void s_s3_client_prepare_callback_queue_request( int error_code, void *user_data); +static bool s_s3_client_should_update_meta_request( + struct aws_s3_client *client, + struct aws_s3_meta_request *meta_request, + uint32_t num_requests_in_flight, + const uint32_t max_requests_in_flight, + const uint32_t max_requests_prepare) { + + /* CreateSession has high priority to bypass the checks. */ + if (meta_request->type == AWS_S3_META_REQUEST_TYPE_DEFAULT) { + struct aws_s3_meta_request_default *meta_request_default = meta_request->impl; + if (aws_string_eq_c_str(meta_request_default->operation_name, "CreateSession")) { + return true; + } + } + + /** + * If number of being-prepared + already-prepared-and-queued requests is more than the max that can + * be in the preparation stage. + * Or total number of requests tracked by the client is more than the max tracked ("in flight") + * requests. + * + * We cannot create more requests for this meta request. + */ + if ((client->threaded_data.num_requests_being_prepared + client->threaded_data.request_queue_size) >= + max_requests_prepare) { + return false; + } + if (num_requests_in_flight >= max_requests_in_flight) { + return false; + } + + /* If this particular endpoint doesn't have any known addresses yet, then we don't want to go full speed in + * ramping up requests just yet. If there is already enough in the queue for one address (even if those + * aren't for this particular endpoint) we skip over this meta request for now. */ + struct aws_s3_endpoint *endpoint = meta_request->endpoint; + AWS_ASSERT(endpoint != NULL); + AWS_ASSERT(client->vtable->get_host_address_count); + size_t num_known_vips = client->vtable->get_host_address_count( + client->client_bootstrap->host_resolver, endpoint->host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A); + if (num_known_vips == 0 && (client->threaded_data.num_requests_being_prepared + + client->threaded_data.request_queue_size) >= g_min_num_connections) { + return false; + } + + /* Nothing blocks the meta request to create more requests */ + return true; +} + void aws_s3_client_update_meta_requests_threaded(struct aws_s3_client *client) { AWS_PRECONDITION(client); @@ -1378,39 +1788,25 @@ void aws_s3_client_update_meta_requests_threaded(struct aws_s3_client *client) { const uint32_t num_passes = AWS_ARRAY_SIZE(pass_flags); + aws_s3_buffer_pool_remove_reservation_hold(client->buffer_pool); + for (uint32_t pass_index = 0; pass_index < num_passes; ++pass_index) { - /* While: - * * Number of being-prepared + already-prepared-and-queued requests is less than the max that can be in the - * preparation stage. - * * Total number of requests tracked by the client is less than the max tracked ("in flight") requests. - * * There are meta requests to get requests from. - * - * Then update meta requests to get new requests that can then be prepared (reading from any streams, signing, - * etc.) for sending. + /** + * Iterate through the meta requests to update meta requests and get new requests that can then be prepared ++ * (reading from any streams, signing, etc.) for sending. */ - while ((client->threaded_data.num_requests_being_prepared + client->threaded_data.request_queue_size) < - max_requests_prepare && - num_requests_in_flight < max_requests_in_flight && - !aws_linked_list_empty(&client->threaded_data.meta_requests)) { + while (!aws_linked_list_empty(&client->threaded_data.meta_requests)) { struct aws_linked_list_node *meta_request_node = aws_linked_list_begin(&client->threaded_data.meta_requests); struct aws_s3_meta_request *meta_request = AWS_CONTAINER_OF(meta_request_node, struct aws_s3_meta_request, client_process_work_threaded_data); - struct aws_s3_endpoint *endpoint = meta_request->endpoint; - AWS_ASSERT(endpoint != NULL); - - AWS_ASSERT(client->vtable->get_host_address_count); - size_t num_known_vips = client->vtable->get_host_address_count( - client->client_bootstrap->host_resolver, endpoint->host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A); + if (!s_s3_client_should_update_meta_request( + client, meta_request, num_requests_in_flight, max_requests_in_flight, max_requests_prepare)) { - /* If this particular endpoint doesn't have any known addresses yet, then we don't want to go full speed in - * ramping up requests just yet. If there is already enough in the queue for one address (even if those - * aren't for this particular endpoint) we skip over this meta request for now. */ - if (num_known_vips == 0 && (client->threaded_data.num_requests_being_prepared + - client->threaded_data.request_queue_size) >= g_max_num_connections_per_vip) { + /* Move the meta request to be processed from next loop. */ aws_linked_list_remove(&meta_request->client_process_work_threaded_data.node); aws_linked_list_push_back( &meta_requests_work_remaining, &meta_request->client_process_work_threaded_data.node); @@ -1420,6 +1816,9 @@ void aws_s3_client_update_meta_requests_threaded(struct aws_s3_client *client) { struct aws_s3_request *request = NULL; /* Try to grab the next request from the meta request. */ + /* TODO: should we bail out if request fails to update due to mem or + * continue going and hopping that following reqs can fit into mem? + * check if avail space is at least part size? */ bool work_remaining = aws_s3_meta_request_update(meta_request, pass_flags[pass_index], &request); if (work_remaining) { @@ -1482,9 +1881,7 @@ static void s_s3_client_prepare_callback_queue_request( if (error_code != AWS_ERROR_SUCCESS) { s_s3_client_meta_request_finished_request(client, meta_request, request, error_code); - - aws_s3_request_release(request); - request = NULL; + request = aws_s3_request_release(request); } /* BEGIN CRITICAL SECTION */ @@ -1516,14 +1913,15 @@ void aws_s3_client_update_connections_threaded(struct aws_s3_client *client) { struct aws_s3_request *request = aws_s3_client_dequeue_request_threaded(client); const uint32_t max_active_connections = aws_s3_client_get_max_active_connections(client, request->meta_request); - - /* Unless the request is marked "always send", if this meta request has a finish result, then finish the request - * now and release it. */ - if (!request->always_send && aws_s3_meta_request_has_finish_result(request->meta_request)) { + if (request->is_noop) { + /* If request is no-op, finishes and cleans up the request */ + s_s3_client_meta_request_finished_request(client, request->meta_request, request, AWS_ERROR_SUCCESS); + request = aws_s3_request_release(request); + } else if (!request->always_send && aws_s3_meta_request_has_finish_result(request->meta_request)) { + /* Unless the request is marked "always send", if this meta request has a finish result, then finish the + * request now and release it. */ s_s3_client_meta_request_finished_request(client, request->meta_request, request, AWS_ERROR_S3_CANCELED); - - aws_s3_request_release(request); - request = NULL; + request = aws_s3_request_release(request); } else if ( s_s3_client_get_num_requests_network_io(client, request->meta_request->type) < max_active_connections) { s_s3_client_create_connection_for_request(client, request); @@ -1582,9 +1980,9 @@ static void s_s3_client_create_connection_for_request_default( struct aws_http_headers *message_headers = aws_http_message_get_headers(meta_request->initial_request_message); AWS_ASSERT(message_headers); - int get_header_result = aws_http_headers_get(message_headers, g_host_header_name, &host_header_value); - AWS_ASSERT(get_header_result == AWS_OP_SUCCESS); - (void)get_header_result; + int result = aws_http_headers_get(message_headers, g_host_header_name, &host_header_value); + AWS_ASSERT(result == AWS_OP_SUCCESS); + (void)result; if (aws_retry_strategy_acquire_retry_token( client->retry_strategy, &host_header_value, s_s3_client_acquired_retry_token, connection, 0)) { @@ -1693,7 +2091,12 @@ static void s_s3_client_on_acquire_http_connection( error_code, aws_error_str(error_code)); - if (error_code == AWS_IO_DNS_INVALID_NAME) { + if (error_code == AWS_IO_DNS_INVALID_NAME || error_code == AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE) { + /** + * Fall fast without retry + * - Invalid DNS name will not change after retry. + * - TLS negotiation is expensive and retry will not help in most case. + */ goto error_fail; } @@ -1736,8 +2139,11 @@ void aws_s3_client_notify_connection_finished( struct aws_s3_endpoint *endpoint = meta_request->endpoint; AWS_PRECONDITION(endpoint); + if (request->send_data.metrics) { + request->send_data.metrics->crt_info_metrics.error_code = error_code; + } - /* If we're trying to setup a retry... */ + /* If we're trying to set up a retry... */ if (finish_code == AWS_S3_CONNECTION_FINISH_CODE_RETRY) { if (connection->retry_token == NULL) { @@ -1766,11 +2172,13 @@ void aws_s3_client_notify_connection_finished( AWS_LOGF_DEBUG( AWS_LS_S3_CLIENT, - "id=%p Client scheduling retry of request %p for meta request %p with token %p.", + "id=%p Client scheduling retry of request %p for meta request %p with token %p with error code %d (%s).", (void *)client, (void *)request, (void *)meta_request, - (void *)connection->retry_token); + (void *)connection->retry_token, + error_code, + aws_error_str(error_code)); enum aws_retry_error_type error_type = AWS_RETRY_ERROR_TYPE_TRANSIENT; @@ -1846,8 +2254,7 @@ reset_connection: } if (connection->request != NULL) { - aws_s3_request_release(connection->request); - connection->request = NULL; + connection->request = aws_s3_request_release(connection->request); } aws_retry_token_release(connection->retry_token); @@ -1980,10 +2387,14 @@ struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_new_up const struct aws_s3_upload_resume_token_options *options) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(options); + if (options->part_size > SIZE_MAX) { + aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); + return NULL; + } struct aws_s3_meta_request_resume_token *token = aws_s3_meta_request_resume_token_new(allocator); token->multipart_upload_id = aws_string_new_from_cursor(allocator, &options->upload_id); - token->part_size = options->part_size; + token->part_size = (size_t)options->part_size; token->total_num_parts = options->total_num_parts; token->num_parts_completed = options->num_parts_completed; token->type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT; @@ -2012,9 +2423,9 @@ enum aws_s3_meta_request_type aws_s3_meta_request_resume_token_type( return resume_token->type; } -size_t aws_s3_meta_request_resume_token_part_size(struct aws_s3_meta_request_resume_token *resume_token) { +uint64_t aws_s3_meta_request_resume_token_part_size(struct aws_s3_meta_request_resume_token *resume_token) { AWS_FATAL_PRECONDITION(resume_token); - return resume_token->part_size; + return (uint64_t)resume_token->part_size; } size_t aws_s3_meta_request_resume_token_total_num_parts(struct aws_s3_meta_request_resume_token *resume_token) { @@ -2036,3 +2447,171 @@ struct aws_byte_cursor aws_s3_meta_request_resume_token_upload_id( return aws_byte_cursor_from_c_str(""); } + +static uint64_t s_upload_timeout_threshold_ns = 5000000000; /* 5 Secs */ +const size_t g_expect_timeout_offset_ms = + 700; /* 0.7 Secs. From experiments on c5n.18xlarge machine for 30 GiB upload, it gave us best performance. */ + +/** + * The upload timeout optimization: explained. + * + * Sometimes, S3 is extremely slow responding to an upload. + * In these cases, it's much faster to cancel and resend the upload, + * vs waiting 5sec for the slow response. + * + * Typically, S3 responds to an upload in 0.2sec after the request is fully received. + * But occasionally (about 0.1%) it takes 5sec to respond. + * In a large 30GiB file upload, you can expect about 4 parts to suffer from + * a slow response. If one of these parts is near the end of the file, + * then we end up sitting around doing nothing for up to 5sec, waiting + * for this final slow upload to complete. + * + * We use the response_first_byte_timeout HTTP option to cancel uploads + * suffering from a slow response. But how should we set it? A fast 100Gbps + * machine definitely wants it! But a slow computer does not. A slow computer + * would be better off waiting 5sec for the response, vs re-uploading the whole request. + * + * The current algorithm: + * 1. Start without a timeout value. After 10 requests completed, we know the average of how long the + * request takes. We decide if it's worth to set a timeout value or not. (If the average of request takes more than + * 5 secs or not) TODO: if the client have different part size, this doesn't make sense + * 2. If it is worth to retry, start with a default timeout value, 1 sec. + * 3. If a request finishes successfully, use the average response_to_first_byte_time + g_expect_timeout_offset_ms as + * our expected timeout value. (TODO: The real expected timeout value should be a P99 of all the requests.) + * 3.1 Adjust the current timeout value against the expected timeout value, via 0.99 * <current timeout> + 0.01 * + * <expected timeout> to get closer to the expected timeout value. + * 4. If request had timed out. We check the timeout rate. + * 4.1 If timeout rate is larger than 0.1%, we increase the timeout value by 100ms (Check the timeout value when the + * request was made, if the updated timeout value is larger than the expected, skip update). + * 4.2 If timeout rate is larger than 1%, we increase the timeout value by 1 secs (If needed). And clear the rate + * to get the exact rate with new timeout value. + * 4.3 Once the timeout value is larger than 5 secs, we stop the process. + * + * Invoked from `s_s3_auto_ranged_put_send_request_finish`. + */ +void aws_s3_client_update_upload_part_timeout( + struct aws_s3_client *client, + struct aws_s3_request *finished_upload_part_request, + int finished_error_code) { + + aws_s3_client_lock_synced_data(client); + struct aws_s3_upload_part_timeout_stats *stats = &client->synced_data.upload_part_stats; + if (stats->stop_timeout) { + /* Timeout was disabled */ + goto unlock; + } + + struct aws_s3_request_metrics *metrics = finished_upload_part_request->send_data.metrics; + size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); + uint64_t current_timeout_ns = + aws_timestamp_convert(current_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); + uint64_t updated_timeout_ns = 0; + uint64_t expect_timeout_offset_ns = + aws_timestamp_convert(g_expect_timeout_offset_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); + + switch (finished_error_code) { + case AWS_ERROR_SUCCESS: + /* We only interested in request succeed */ + stats->num_successful_upload_requests = aws_add_u64_saturating(stats->num_successful_upload_requests, 1); + if (stats->num_successful_upload_requests <= 10) { + /* Gether the data */ + uint64_t request_time_ns = + metrics->time_metrics.receive_end_timestamp_ns - metrics->time_metrics.send_start_timestamp_ns; + stats->initial_request_time.sum_ns = + aws_add_u64_saturating(stats->initial_request_time.sum_ns, request_time_ns); + ++stats->initial_request_time.num_samples; + if (stats->num_successful_upload_requests == 10) { + /* Decide we need a timeout or not */ + uint64_t average_request_time_ns = + stats->initial_request_time.sum_ns / stats->initial_request_time.num_samples; + if (average_request_time_ns >= s_upload_timeout_threshold_ns) { + /* We don't need a timeout, as retry will be slower than just wait for the server to response */ + stats->stop_timeout = true; + } else { + /* Start the timeout at 1 secs */ + aws_atomic_store_int(&client->upload_timeout_ms, 1000); + } + } + goto unlock; + } + /* Starts to update timeout on case of succeed */ + stats->timeout_rate_tracking.num_completed = + aws_add_u64_saturating(stats->timeout_rate_tracking.num_completed, 1); + /* Response to first byte is time taken for the first byte data received from the request finished + * sending */ + uint64_t response_to_first_byte_time_ns = + metrics->time_metrics.receive_start_timestamp_ns - metrics->time_metrics.send_end_timestamp_ns; + stats->response_to_first_byte_time.sum_ns = + aws_add_u64_saturating(stats->response_to_first_byte_time.sum_ns, response_to_first_byte_time_ns); + stats->response_to_first_byte_time.num_samples = + aws_add_u64_saturating(stats->response_to_first_byte_time.num_samples, 1); + + uint64_t average_response_to_first_byte_time_ns = + stats->response_to_first_byte_time.sum_ns / stats->response_to_first_byte_time.num_samples; + uint64_t expected_timeout_ns = average_response_to_first_byte_time_ns + expect_timeout_offset_ns; + double timeout_ns_double = (double)current_timeout_ns * 0.99 + (double)expected_timeout_ns * 0.01; + updated_timeout_ns = (uint64_t)timeout_ns_double; + break; + + case AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT: + if (stats->num_successful_upload_requests < 10) { + goto unlock; + } + + /* Starts to update timeout on case of timed out */ + stats->timeout_rate_tracking.num_completed = + aws_add_u64_saturating(stats->timeout_rate_tracking.num_completed, 1); + stats->timeout_rate_tracking.num_failed = + aws_add_u64_saturating(stats->timeout_rate_tracking.num_failed, 1); + + uint64_t timeout_threshold = (uint64_t)ceil((double)stats->timeout_rate_tracking.num_completed / 100); + uint64_t warning_threshold = (uint64_t)ceil((double)stats->timeout_rate_tracking.num_completed / 1000); + + if (stats->timeout_rate_tracking.num_failed > timeout_threshold) { + /** + * Restore the rate track, as we are larger than 1%, it goes off the record. + */ + + AWS_LOGF_WARN( + AWS_LS_S3_CLIENT, + "id=%p Client upload part timeout rate is larger than expected, current timeout is %zu, bump it " + "up. Request original timeout is: %zu", + (void *)client, + current_timeout_ms, + finished_upload_part_request->upload_timeout_ms); + stats->timeout_rate_tracking.num_completed = 0; + stats->timeout_rate_tracking.num_failed = 0; + if (finished_upload_part_request->upload_timeout_ms + 1000 > current_timeout_ms) { + /* Update the timeout by adding 1 secs only when it's worth to do so */ + updated_timeout_ns = aws_add_u64_saturating( + current_timeout_ns, aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); + } + } else if (stats->timeout_rate_tracking.num_failed > warning_threshold) { + if (finished_upload_part_request->upload_timeout_ms + 100 > current_timeout_ms) { + /* Only update the timeout by adding 100 ms if the request was made with a longer time out. */ + updated_timeout_ns = aws_add_u64_saturating( + current_timeout_ns, + aws_timestamp_convert(100, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); + } + } + break; + default: + break; + } + + if (updated_timeout_ns != 0) { + if (updated_timeout_ns > s_upload_timeout_threshold_ns) { + /* Stops timeout, as wait for server to response will be faster to set our own timeout */ + stats->stop_timeout = true; + /* Unset the upload_timeout */ + updated_timeout_ns = 0; + } + /* Apply the updated timeout */ + aws_atomic_store_int( + &client->upload_timeout_ms, + (size_t)aws_timestamp_convert(updated_timeout_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL)); + } + +unlock: + aws_s3_client_unlock_synced_data(client); +} diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_copy_object.c b/contrib/restricted/aws/aws-c-s3/source/s3_copy_object.c index 60c80d4b11b..ba7e68c1205 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_copy_object.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_copy_object.c @@ -7,16 +7,16 @@ #include "aws/s3/private/s3_request_messages.h" #include "aws/s3/private/s3_util.h" #include <aws/common/string.h> -#include <aws/io/stream.h> /* Objects with size smaller than the constant below are bypassed as S3 CopyObject instead of multipart copy */ -static const size_t s_multipart_copy_minimum_object_size = 1L * 1024L * 1024L * 1024L; +static const size_t s_multipart_copy_minimum_object_size = GB_TO_BYTES(1); -static const size_t s_etags_initial_capacity = 16; -static const struct aws_byte_cursor s_upload_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("UploadId"); static const size_t s_complete_multipart_upload_init_body_size_bytes = 512; static const size_t s_abort_multipart_upload_init_body_size_bytes = 512; +/* TODO: make this configurable or at least expose it. */ +const size_t s_min_copy_part_size = MB_TO_BYTES(128); + static const struct aws_byte_cursor s_create_multipart_upload_copy_headers[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"), @@ -30,7 +30,7 @@ static bool s_s3_copy_object_update( uint32_t flags, struct aws_s3_request **out_request); -static int s_s3_copy_object_prepare_request(struct aws_s3_meta_request *meta_request, struct aws_s3_request *request); +static struct aws_future_void *s_s3_copy_object_prepare_request(struct aws_s3_request *request); static void s_s3_copy_object_request_finished( struct aws_s3_meta_request *meta_request, @@ -39,7 +39,7 @@ static void s_s3_copy_object_request_finished( static struct aws_s3_meta_request_vtable s_s3_copy_object_vtable = { .update = s_s3_copy_object_update, - .send_request_finish = aws_s3_meta_request_send_request_finish_handle_async_error, + .send_request_finish = aws_s3_meta_request_send_request_finish_default, .prepare_request = s_s3_copy_object_prepare_request, .init_signing_date_time = aws_s3_meta_request_init_signing_date_time_default, .sign_request = aws_s3_meta_request_sign_request_default, @@ -67,7 +67,6 @@ struct aws_s3_meta_request *aws_s3_meta_request_copy_object_new( const size_t UNKNOWN_CONTENT_LENGTH = 0; const int UNKNOWN_NUM_PARTS = 0; - /* TODO Handle and test multipart copy */ if (aws_s3_meta_request_init_base( allocator, client, @@ -82,7 +81,7 @@ struct aws_s3_meta_request *aws_s3_meta_request_copy_object_new( } aws_array_list_init_dynamic( - ©_object->synced_data.etag_list, allocator, s_etags_initial_capacity, sizeof(struct aws_string *)); + ©_object->synced_data.part_list, allocator, 0, sizeof(struct aws_s3_mpu_part_info *)); copy_object->synced_data.content_length = UNKNOWN_CONTENT_LENGTH; copy_object->synced_data.total_num_parts = UNKNOWN_NUM_PARTS; @@ -102,14 +101,15 @@ static void s_s3_meta_request_copy_object_destroy(struct aws_s3_meta_request *me aws_string_destroy(copy_object->upload_id); copy_object->upload_id = NULL; - for (size_t etag_index = 0; etag_index < aws_array_list_length(©_object->synced_data.etag_list); ++etag_index) { - struct aws_string *etag = NULL; - - aws_array_list_get_at(©_object->synced_data.etag_list, &etag, etag_index); - aws_string_destroy(etag); + for (size_t part_index = 0; part_index < aws_array_list_length(©_object->synced_data.part_list); ++part_index) { + struct aws_s3_mpu_part_info *part = NULL; + aws_array_list_get_at(©_object->synced_data.part_list, &part, part_index); + aws_string_destroy(part->etag); + aws_byte_buf_clean_up(&part->checksum_base64); + aws_mem_release(meta_request->allocator, part); } - aws_array_list_clean_up(©_object->synced_data.etag_list); + aws_array_list_clean_up(©_object->synced_data.part_list); aws_http_headers_release(copy_object->synced_data.needed_response_headers); aws_mem_release(meta_request->allocator, copy_object); } @@ -136,7 +136,8 @@ static bool s_s3_copy_object_update( request = aws_s3_request_new( meta_request, AWS_S3_COPY_OBJECT_REQUEST_TAG_GET_OBJECT_SIZE, - 0, + AWS_S3_REQUEST_TYPE_HEAD_OBJECT, + 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); copy_object->synced_data.head_object_sent = true; @@ -155,7 +156,8 @@ static bool s_s3_copy_object_update( request = aws_s3_request_new( meta_request, AWS_S3_COPY_OBJECT_REQUEST_TAG_BYPASS, - 1, + AWS_S3_REQUEST_TYPE_COPY_OBJECT, + 1 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); AWS_LOGF_DEBUG( @@ -182,7 +184,8 @@ static bool s_s3_copy_object_update( request = aws_s3_request_new( meta_request, AWS_S3_COPY_OBJECT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD, - 0, + AWS_S3_REQUEST_TYPE_CREATE_MULTIPART_UPLOAD, + 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); copy_object->synced_data.create_multipart_upload_sent = true; @@ -212,11 +215,10 @@ static bool s_s3_copy_object_update( request = aws_s3_request_new( meta_request, AWS_S3_COPY_OBJECT_REQUEST_TAG_MULTIPART_COPY, - 0, + AWS_S3_REQUEST_TYPE_UPLOAD_PART_COPY, + copy_object->threaded_update_data.next_part_number, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); - request->part_number = copy_object->threaded_update_data.next_part_number; - ++copy_object->threaded_update_data.next_part_number; ++copy_object->synced_data.num_parts_sent; @@ -241,7 +243,8 @@ static bool s_s3_copy_object_update( request = aws_s3_request_new( meta_request, AWS_S3_COPY_OBJECT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD, - 0, + AWS_S3_REQUEST_TYPE_COMPLETE_MULTIPART_UPLOAD, + 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); copy_object->synced_data.complete_multipart_upload_sent = true; @@ -294,7 +297,8 @@ static bool s_s3_copy_object_update( request = aws_s3_request_new( meta_request, AWS_S3_COPY_OBJECT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD, - 0, + AWS_S3_REQUEST_TYPE_ABORT_MULTIPART_UPLOAD, + 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_ALWAYS_SEND); copy_object->synced_data.abort_multipart_upload_sent = true; @@ -314,9 +318,13 @@ has_work_remaining: work_remaining = true; no_work_remaining: + /* If some events are still being delivered to caller, then wait for those to finish */ + if (!work_remaining && aws_s3_meta_request_are_events_out_for_delivery_synced(meta_request)) { + work_remaining = true; + } if (!work_remaining) { - aws_s3_meta_request_set_success_synced(meta_request, AWS_S3_RESPONSE_STATUS_SUCCESS); + aws_s3_meta_request_set_success_synced(meta_request, AWS_HTTP_STATUS_CODE_200_OK); } aws_s3_meta_request_unlock_synced_data(meta_request); @@ -333,7 +341,8 @@ no_work_remaining: } /* Given a request, prepare it for sending based on its description. */ -static int s_s3_copy_object_prepare_request(struct aws_s3_meta_request *meta_request, struct aws_s3_request *request) { +static struct aws_future_void *s_s3_copy_object_prepare_request(struct aws_s3_request *request) { + struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); struct aws_s3_copy_object *copy_object = meta_request->impl; @@ -342,6 +351,7 @@ static int s_s3_copy_object_prepare_request(struct aws_s3_meta_request *meta_req aws_s3_meta_request_lock_synced_data(meta_request); struct aws_http_message *message = NULL; + bool success = false; switch (request->request_tag) { @@ -372,25 +382,30 @@ static int s_s3_copy_object_prepare_request(struct aws_s3_meta_request *meta_req part_size_uint64); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - return AWS_OP_ERR; + goto finish; } - size_t part_size = (size_t)part_size_uint64; - - const size_t MIN_PART_SIZE = 64L * 1024L * 1024L; /* minimum partition size */ - if (part_size < MIN_PART_SIZE) { - part_size = MIN_PART_SIZE; + uint64_t max_part_size = GB_TO_BYTES((uint64_t)5); + if (max_part_size > SIZE_MAX) { + max_part_size = SIZE_MAX; } + uint32_t num_parts = 0; + size_t part_size = 0; - uint32_t num_parts = (uint32_t)(copy_object->synced_data.content_length / part_size); - - if ((copy_object->synced_data.content_length % part_size) > 0) { - ++num_parts; - } + aws_s3_calculate_optimal_mpu_part_size_and_num_parts( + copy_object->synced_data.content_length, s_min_copy_part_size, max_part_size, &part_size, &num_parts); copy_object->synced_data.total_num_parts = num_parts; copy_object->synced_data.part_size = part_size; + /* Fill part_list */ + aws_array_list_ensure_capacity(©_object->synced_data.part_list, num_parts); + while (aws_array_list_length(©_object->synced_data.part_list) < num_parts) { + struct aws_s3_mpu_part_info *part = + aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_s3_mpu_part_info)); + aws_array_list_push_back(©_object->synced_data.part_list, &part); + } + AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "Starting multi-part Copy using part size=%zu, total_num_parts=%zu", @@ -410,6 +425,7 @@ static int s_s3_copy_object_prepare_request(struct aws_s3_meta_request *meta_req case AWS_S3_COPY_OBJECT_REQUEST_TAG_MULTIPART_COPY: { /* Create a new uploadPartCopy message to upload a part. */ /* compute sub-request range */ + /* note that range-end is inclusive */ uint64_t range_start = (request->part_number - 1) * copy_object->synced_data.part_size; uint64_t range_end = range_start + copy_object->synced_data.part_size - 1; if (range_end >= copy_object->synced_data.content_length) { @@ -459,8 +475,7 @@ static int s_s3_copy_object_prepare_request(struct aws_s3_meta_request *meta_req meta_request->initial_request_message, &request->request_body, copy_object->upload_id, - ©_object->synced_data.etag_list, - NULL, + ©_object->synced_data.part_list, AWS_SCA_NONE); break; @@ -498,13 +513,14 @@ static int s_s3_copy_object_prepare_request(struct aws_s3_meta_request *meta_req "id=%p Could not allocate message for request with tag %d for CopyObject meta request.", (void *)meta_request, request->request_tag); - goto message_create_failed; + goto finish; } aws_s3_request_setup_send_data(request, message); aws_http_message_release(message); + /* Success! */ AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Prepared request %p for part %d", @@ -512,42 +528,36 @@ static int s_s3_copy_object_prepare_request(struct aws_s3_meta_request *meta_req (void *)request, request->part_number); - return AWS_OP_SUCCESS; + success = true; -message_create_failed: - - return AWS_OP_ERR; +finish:; + struct aws_future_void *future = aws_future_void_new(meta_request->allocator); + if (success) { + aws_future_void_set_result(future); + } else { + aws_future_void_set_error(future, aws_last_error_or_unknown()); + } + return future; } /* For UploadPartCopy requests, etag is sent in the request body, within XML entity quotes */ static struct aws_string *s_etag_new_from_upload_part_copy_response( struct aws_allocator *allocator, struct aws_byte_buf *response_body) { - struct aws_string *etag = NULL; - - struct aws_byte_cursor response_body_cursor = aws_byte_cursor_from_buf(response_body); - struct aws_string *etag_within_xml_quotes = - aws_xml_get_top_level_tag(allocator, &g_etag_header_name, &response_body_cursor); + struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(response_body); + struct aws_byte_cursor etag_within_xml_quotes = {0}; + const char *xml_path[] = {"CopyPartResult", "ETag", NULL}; + aws_xml_get_body_at_path(allocator, xml_doc, xml_path, &etag_within_xml_quotes); - struct aws_byte_buf etag_within_quotes_byte_buf; - AWS_ZERO_STRUCT(etag_within_quotes_byte_buf); - replace_quote_entities(allocator, etag_within_xml_quotes, &etag_within_quotes_byte_buf); + struct aws_byte_buf etag_within_quotes_byte_buf = aws_replace_quote_entities(allocator, etag_within_xml_quotes); - /* Remove the quotes surrounding the etag. */ - struct aws_byte_cursor etag_within_quotes_byte_cursor = aws_byte_cursor_from_buf(&etag_within_quotes_byte_buf); - if (etag_within_quotes_byte_cursor.len >= 2 && etag_within_quotes_byte_cursor.ptr[0] == '"' && - etag_within_quotes_byte_cursor.ptr[etag_within_quotes_byte_cursor.len - 1] == '"') { + struct aws_string *stripped_etag = + aws_strip_quotes(allocator, aws_byte_cursor_from_buf(&etag_within_quotes_byte_buf)); - aws_byte_cursor_advance(&etag_within_quotes_byte_cursor, 1); - --etag_within_quotes_byte_cursor.len; - } - - etag = aws_string_new_from_cursor(allocator, &etag_within_quotes_byte_cursor); aws_byte_buf_clean_up(&etag_within_quotes_byte_buf); - aws_string_destroy(etag_within_xml_quotes); - return etag; + return stripped_etag; } static void s_s3_copy_object_request_finished( @@ -561,7 +571,6 @@ static void s_s3_copy_object_request_finished( struct aws_s3_copy_object *copy_object = meta_request->impl; aws_s3_meta_request_lock_synced_data(meta_request); - switch (request->request_tag) { case AWS_S3_COPY_OBJECT_REQUEST_TAG_GET_OBJECT_SIZE: { @@ -601,6 +610,8 @@ static void s_s3_copy_object_request_finished( /* Copy all the response headers from this request. */ copy_http_headers(request->send_data.response_headers, final_response_headers); + /* Invoke the callback without lock */ + aws_s3_meta_request_unlock_synced_data(meta_request); /* Notify the user of the headers. */ if (meta_request->headers_callback( meta_request, @@ -611,12 +622,23 @@ static void s_s3_copy_object_request_finished( error_code = aws_last_error_or_unknown(); } meta_request->headers_callback = NULL; + /* Grab the lock again after the callback */ + aws_s3_meta_request_lock_synced_data(meta_request); aws_http_headers_release(final_response_headers); } /* Signals completion of the meta request */ if (error_code == AWS_ERROR_SUCCESS) { + + /* Send progress_callback for delivery on io_event_loop thread */ + if (meta_request->progress_callback != NULL) { + struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_PROGRESS}; + event.u.progress.info.bytes_transferred = copy_object->synced_data.content_length; + event.u.progress.info.content_length = copy_object->synced_data.content_length; + aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); + } + copy_object->synced_data.copy_request_bypass_completed = true; } else { /* Bypassed CopyObject request failed */ @@ -643,13 +665,14 @@ static void s_s3_copy_object_request_finished( } } - struct aws_byte_cursor buffer_byte_cursor = aws_byte_cursor_from_buf(&request->send_data.response_body); + struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(&request->send_data.response_body); /* Find the upload id for this multipart upload. */ - struct aws_string *upload_id = - aws_xml_get_top_level_tag(meta_request->allocator, &s_upload_id, &buffer_byte_cursor); + struct aws_byte_cursor upload_id = {0}; + const char *xml_path[] = {"InitiateMultipartUploadResult", "UploadId", NULL}; + aws_xml_get_body_at_path(meta_request->allocator, xml_doc, xml_path, &upload_id); - if (upload_id == NULL) { + if (upload_id.len == 0) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not find upload-id in create-multipart-upload response", @@ -659,7 +682,7 @@ static void s_s3_copy_object_request_finished( error_code = AWS_ERROR_S3_MISSING_UPLOAD_ID; } else { /* Store the multipart upload id. */ - copy_object->upload_id = upload_id; + copy_object->upload_id = aws_string_new_from_cursor(meta_request->allocator, &upload_id); } } @@ -696,22 +719,20 @@ static void s_s3_copy_object_request_finished( AWS_ASSERT(etag != NULL); ++copy_object->synced_data.num_parts_successful; + + /* Send progress_callback for delivery on io_event_loop thread. */ if (meta_request->progress_callback != NULL) { - struct aws_s3_meta_request_progress progress = { - .bytes_transferred = copy_object->synced_data.part_size, - .content_length = copy_object->synced_data.content_length}; - meta_request->progress_callback(meta_request, &progress, meta_request->user_data); + struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_PROGRESS}; + event.u.progress.info.bytes_transferred = copy_object->synced_data.part_size; + event.u.progress.info.content_length = copy_object->synced_data.content_length; + aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); } - struct aws_string *null_etag = NULL; - /* ETags need to be associated with their part number, so we keep the etag indices consistent with - * part numbers. This means we may have to add padding to the list in the case that parts finish out - * of order. */ - while (aws_array_list_length(©_object->synced_data.etag_list) < part_number) { - int push_back_result = aws_array_list_push_back(©_object->synced_data.etag_list, &null_etag); - AWS_FATAL_ASSERT(push_back_result == AWS_OP_SUCCESS); - } - aws_array_list_set_at(©_object->synced_data.etag_list, &etag, part_index); + struct aws_s3_mpu_part_info *part = NULL; + aws_array_list_get_at(©_object->synced_data.part_list, &part, part_index); + AWS_ASSERT(part != NULL); + part->etag = etag; + } else { ++copy_object->synced_data.num_parts_failed; aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); @@ -732,29 +753,27 @@ static void s_s3_copy_object_request_finished( */ copy_http_headers(copy_object->synced_data.needed_response_headers, final_response_headers); - struct aws_byte_cursor response_body_cursor = - aws_byte_cursor_from_buf(&request->send_data.response_body); + struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(&request->send_data.response_body); /* Grab the ETag for the entire object, and set it as a header. */ - struct aws_string *etag_header_value = - aws_xml_get_top_level_tag(meta_request->allocator, &g_etag_header_name, &response_body_cursor); - - if (etag_header_value != NULL) { - struct aws_byte_buf etag_header_value_byte_buf; - AWS_ZERO_STRUCT(etag_header_value_byte_buf); - - replace_quote_entities(meta_request->allocator, etag_header_value, &etag_header_value_byte_buf); + struct aws_byte_cursor etag_header_value = {0}; + const char *xml_path[] = {"CompleteMultipartUploadResult", "ETag", NULL}; + aws_xml_get_body_at_path(meta_request->allocator, xml_doc, xml_path, &etag_header_value); + if (etag_header_value.len > 0) { + struct aws_byte_buf etag_header_value_byte_buf = + aws_replace_quote_entities(meta_request->allocator, etag_header_value); aws_http_headers_set( final_response_headers, g_etag_header_name, aws_byte_cursor_from_buf(&etag_header_value_byte_buf)); - aws_string_destroy(etag_header_value); aws_byte_buf_clean_up(&etag_header_value_byte_buf); } /* Notify the user of the headers. */ + /* Invoke the callback without lock */ + aws_s3_meta_request_unlock_synced_data(meta_request); if (meta_request->headers_callback( meta_request, final_response_headers, @@ -764,6 +783,8 @@ static void s_s3_copy_object_request_finished( error_code = aws_last_error_or_unknown(); } meta_request->headers_callback = NULL; + /* Grab the lock again after the callback */ + aws_s3_meta_request_lock_synced_data(meta_request); aws_http_headers_release(final_response_headers); } @@ -783,5 +804,8 @@ static void s_s3_copy_object_request_finished( break; } } + + aws_s3_request_finish_up_metrics_synced(request, meta_request); + aws_s3_meta_request_unlock_synced_data(meta_request); } diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_default_meta_request.c b/contrib/restricted/aws/aws-c-s3/source/s3_default_meta_request.c index 5bc39a7316f..0e759ff610e 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_default_meta_request.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_default_meta_request.c @@ -6,10 +6,15 @@ #include <aws/common/string.h> #include <inttypes.h> -#ifdef _MSC_VER -/* sscanf warning (not currently scanning for strings) */ -# pragma warning(disable : 4996) -#endif +/* Data for aws_s3_meta_request_default's vtable->prepare_request() job */ +struct aws_s3_default_prepare_request_job { + struct aws_allocator *allocator; + struct aws_s3_request *request; + /* async step: read request body */ + struct aws_future_bool *step1_read_body; + /* future to set when this whole job completes */ + struct aws_future_void *on_complete; +}; static void s_s3_meta_request_default_destroy(struct aws_s3_meta_request *meta_request); @@ -18,9 +23,13 @@ static bool s_s3_meta_request_default_update( uint32_t flags, struct aws_s3_request **out_request); -static int s_s3_meta_request_default_prepare_request( - struct aws_s3_meta_request *meta_request, - struct aws_s3_request *request); +static struct aws_future_void *s_s3_default_prepare_request(struct aws_s3_request *request); + +static void s_s3_default_prepare_request_on_read_done(void *user_data); + +static void s_s3_default_prepare_request_finish( + struct aws_s3_default_prepare_request_job *request_prep, + int error_code); static void s_s3_meta_request_default_request_finished( struct aws_s3_meta_request *meta_request, @@ -29,8 +38,8 @@ static void s_s3_meta_request_default_request_finished( static struct aws_s3_meta_request_vtable s_s3_meta_request_default_vtable = { .update = s_s3_meta_request_default_update, - .send_request_finish = aws_s3_meta_request_send_request_finish_handle_async_error, - .prepare_request = s_s3_meta_request_default_prepare_request, + .send_request_finish = aws_s3_meta_request_send_request_finish_default, + .prepare_request = s_s3_default_prepare_request, .init_signing_date_time = aws_s3_meta_request_init_signing_date_time_default, .sign_request = aws_s3_meta_request_sign_request_default, .finished_request = s_s3_meta_request_default_request_finished, @@ -42,9 +51,11 @@ static struct aws_s3_meta_request_vtable s_s3_meta_request_default_vtable = { struct aws_s3_meta_request *aws_s3_meta_request_default_new( struct aws_allocator *allocator, struct aws_s3_client *client, + enum aws_s3_request_type request_type, uint64_t content_length, bool should_compute_content_md5, const struct aws_s3_meta_request_options *options) { + AWS_PRECONDITION(allocator); AWS_PRECONDITION(client); AWS_PRECONDITION(options); @@ -94,8 +105,25 @@ struct aws_s3_meta_request *aws_s3_meta_request_default_new( } meta_request_default->content_length = (size_t)content_length; + meta_request_default->request_type = request_type; + + /* Try to get operation name. + * When internal aws-c-s3 code creates a default meta-request, + * a valid request_type is always passed in, and we can get its operation name. + * When external users create a default meta-request, they may have provided + * operation name in the options. */ + const char *operation_name = aws_s3_request_type_operation_name(request_type); + if (operation_name[0] != '\0') { + meta_request_default->operation_name = aws_string_new_from_c_str(allocator, operation_name); + } else if (options->operation_name.len != 0) { + meta_request_default->operation_name = aws_string_new_from_cursor(allocator, &options->operation_name); + } - AWS_LOGF_DEBUG(AWS_LS_S3_META_REQUEST, "id=%p Created new Default Meta Request.", (void *)meta_request_default); + AWS_LOGF_DEBUG( + AWS_LS_S3_META_REQUEST, + "id=%p Created new Default Meta Request. operation=%s", + (void *)meta_request_default, + meta_request_default->operation_name ? aws_string_c_str(meta_request_default->operation_name) : "?"); return &meta_request_default->base; } @@ -105,6 +133,7 @@ static void s_s3_meta_request_default_destroy(struct aws_s3_meta_request *meta_r AWS_PRECONDITION(meta_request->impl); struct aws_s3_meta_request_default *meta_request_default = meta_request->impl; + aws_string_destroy(meta_request_default->operation_name); aws_mem_release(meta_request->allocator, meta_request_default); } @@ -134,7 +163,19 @@ static bool s_s3_meta_request_default_update( goto has_work_remaining; } - request = aws_s3_request_new(meta_request, 0, 1, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); + request = aws_s3_request_new( + meta_request, + 0 /*request_tag*/, + meta_request_default->request_type, + 1 /*part_number*/, + AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); + + /* Default meta-request might know operation name, despite not knowing valid request_type. + * If so, pass the name along. */ + if (request->operation_name == NULL && meta_request_default->operation_name != NULL) { + request->operation_name = + aws_string_new_from_string(meta_request->allocator, meta_request_default->operation_name); + } AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, @@ -183,6 +224,10 @@ static bool s_s3_meta_request_default_update( work_remaining = true; no_work_remaining: + /* If some events are still being delivered to caller, then wait for those to finish */ + if (!work_remaining && aws_s3_meta_request_are_events_out_for_delivery_synced(meta_request)) { + work_remaining = true; + } if (!work_remaining) { aws_s3_meta_request_set_success_synced( @@ -208,20 +253,82 @@ static bool s_s3_meta_request_default_update( } /* Given a request, prepare it for sending based on its description. */ -static int s_s3_meta_request_default_prepare_request( - struct aws_s3_meta_request *meta_request, - struct aws_s3_request *request) { +static struct aws_future_void *s_s3_default_prepare_request(struct aws_s3_request *request) { + AWS_PRECONDITION(request); + + struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); struct aws_s3_meta_request_default *meta_request_default = meta_request->impl; AWS_PRECONDITION(meta_request_default); + struct aws_future_void *asyncstep_prepare_request = aws_future_void_new(request->allocator); + + /* Store data for async job */ + struct aws_s3_default_prepare_request_job *request_prep = + aws_mem_calloc(request->allocator, 1, sizeof(struct aws_s3_default_prepare_request_job)); + request_prep->allocator = request->allocator; + request_prep->request = request; + request_prep->on_complete = aws_future_void_acquire(asyncstep_prepare_request); + if (meta_request_default->content_length > 0 && request->num_times_prepared == 0) { aws_byte_buf_init(&request->request_body, meta_request->allocator, meta_request_default->content_length); - if (aws_s3_meta_request_read_body(meta_request, &request->request_body)) { - return AWS_OP_ERR; - } + /* Kick off the async read */ + request_prep->step1_read_body = + aws_s3_meta_request_read_body(meta_request, 0 /*offset*/, &request->request_body); + aws_future_bool_register_callback( + request_prep->step1_read_body, s_s3_default_prepare_request_on_read_done, request_prep); + } else { + /* Don't need to read body, jump directly to the last step */ + s_s3_default_prepare_request_finish(request_prep, AWS_ERROR_SUCCESS); + } + + return asyncstep_prepare_request; +} + +/* Completion callback for reading the body stream */ +static void s_s3_default_prepare_request_on_read_done(void *user_data) { + + struct aws_s3_default_prepare_request_job *request_prep = user_data; + struct aws_s3_request *request = request_prep->request; + struct aws_s3_meta_request *meta_request = request->meta_request; + + int error_code = aws_future_bool_get_error(request_prep->step1_read_body); + + if (error_code != AWS_OP_SUCCESS) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Failed reading request body, error %d (%s)", + (void *)meta_request, + error_code, + aws_error_str(error_code)); + goto finish; + } + + if (request->request_body.len < request->request_body.capacity) { + error_code = AWS_ERROR_S3_INCORRECT_CONTENT_LENGTH; + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Request body is smaller than 'Content-Length' header said it would be", + (void *)meta_request); + goto finish; + } + +finish: + s_s3_default_prepare_request_finish(request_prep, error_code); +} + +/* Finish async preparation of the request */ +static void s_s3_default_prepare_request_finish( + struct aws_s3_default_prepare_request_job *request_prep, + int error_code) { + + struct aws_s3_request *request = request_prep->request; + struct aws_s3_meta_request *meta_request = request->meta_request; + + if (error_code != AWS_ERROR_SUCCESS) { + goto finish; } struct aws_http_message *message = aws_s3_message_util_copy_http_message_no_body_all_headers( @@ -251,7 +358,16 @@ static int s_s3_meta_request_default_prepare_request( AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Meta Request prepared request %p", (void *)meta_request, (void *)request); - return AWS_OP_SUCCESS; +finish: + if (error_code == AWS_ERROR_SUCCESS) { + aws_future_void_set_result(request_prep->on_complete); + } else { + aws_future_void_set_error(request_prep->on_complete, error_code); + } + + aws_future_bool_release(request_prep->step1_read_body); + aws_future_void_release(request_prep->on_complete); + aws_mem_release(request_prep->allocator, request_prep); } static void s_s3_meta_request_default_request_finished( @@ -285,13 +401,40 @@ static void s_s3_meta_request_default_request_finished( meta_request_default->synced_data.cached_response_status = request->send_data.response_status; meta_request_default->synced_data.request_completed = true; meta_request_default->synced_data.request_error_code = error_code; + bool finishing_metrics = true; if (error_code == AWS_ERROR_SUCCESS) { + /* Send progress_callback for delivery on io_event_loop thread. + * For default meta-requests, we invoke the progress_callback once, after the sole HTTP request completes. + * This is simpler than reporting incremental progress as the response body is received, + * or the request body is streamed out, since then we'd also need to handle retries that reset + * progress back to 0% (our existing API only lets us report forward progress). */ + if (meta_request->progress_callback != NULL) { + struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_PROGRESS}; + if (meta_request->type == AWS_S3_META_REQUEST_TYPE_PUT_OBJECT) { + /* For uploads, report request body size */ + event.u.progress.info.bytes_transferred = request->request_body.len; + event.u.progress.info.content_length = request->request_body.len; + } else { + /* For anything else, report response body size */ + event.u.progress.info.bytes_transferred = request->send_data.response_body.len; + event.u.progress.info.content_length = request->send_data.response_body.len; + } + aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); + } + aws_s3_meta_request_stream_response_body_synced(meta_request, request); + /* The body of the request is queued to be streamed, don't record the end timestamp for the request + * yet. */ + finishing_metrics = false; } else { aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } + if (finishing_metrics) { + aws_s3_request_finish_up_metrics_synced(request, meta_request); + } + aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_endpoint.c b/contrib/restricted/aws/aws-c-s3/source/s3_endpoint.c index 74075ccb8af..c8048cb73a7 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_endpoint.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_endpoint.c @@ -3,39 +3,28 @@ * SPDX-License-Identifier: Apache-2.0. */ -#include "aws/s3/private/s3_auto_ranged_get.h" -#include "aws/s3/private/s3_auto_ranged_put.h" #include "aws/s3/private/s3_client_impl.h" -#include "aws/s3/private/s3_default_meta_request.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_util.h" #include <aws/auth/credentials.h> #include <aws/common/assert.h> -#include <aws/common/atomics.h> -#include <aws/common/clock.h> #include <aws/common/device_random.h> -#include <aws/common/environment.h> #include <aws/common/string.h> -#include <aws/common/system_info.h> #include <aws/http/connection.h> #include <aws/http/connection_manager.h> -#include <aws/http/request_response.h> #include <aws/io/channel_bootstrap.h> #include <aws/io/event_loop.h> #include <aws/io/host_resolver.h> -#include <aws/io/retry_strategy.h> #include <aws/io/socket.h> -#include <aws/io/stream.h> #include <aws/io/tls_channel_handler.h> #include <aws/io/uri.h> #include <inttypes.h> -#include <math.h> static const uint32_t s_connection_timeout_ms = 3000; -static const uint16_t s_http_port = 80; -static const uint16_t s_https_port = 443; +static const uint32_t s_http_port = 80; +static const uint32_t s_https_port = 443; static void s_s3_endpoint_on_host_resolver_address_resolved( struct aws_host_resolver *resolver, @@ -50,7 +39,7 @@ static struct aws_http_connection_manager *s_s3_endpoint_create_http_connection_ struct aws_client_bootstrap *client_bootstrap, const struct aws_tls_connection_options *tls_connection_options, uint32_t max_connections, - uint16_t port, + uint32_t port, const struct aws_http_proxy_config *proxy_config, const struct proxy_env_var_settings *proxy_ev_settings, uint32_t connect_timeout_ms, @@ -59,8 +48,6 @@ static struct aws_http_connection_manager *s_s3_endpoint_create_http_connection_ static void s_s3_endpoint_http_connection_manager_shutdown_callback(void *user_data); -static void s_s3_endpoint_ref_count_zero(struct aws_s3_endpoint *endpoint); - static void s_s3_endpoint_acquire(struct aws_s3_endpoint *endpoint, bool already_holding_lock); static void s_s3_endpoint_release(struct aws_s3_endpoint *endpoint); @@ -134,8 +121,6 @@ struct aws_s3_endpoint *aws_s3_endpoint_new( error_cleanup: - aws_string_destroy(options->host_name); - aws_mem_release(allocator, endpoint); return NULL; @@ -147,7 +132,7 @@ static struct aws_http_connection_manager *s_s3_endpoint_create_http_connection_ struct aws_client_bootstrap *client_bootstrap, const struct aws_tls_connection_options *tls_connection_options, uint32_t max_connections, - uint16_t port, + uint32_t port, const struct aws_http_proxy_config *proxy_config, const struct proxy_env_var_settings *proxy_ev_settings, uint32_t connect_timeout_ms, @@ -257,7 +242,6 @@ static void s_s3_endpoint_acquire(struct aws_s3_endpoint *endpoint, bool already aws_s3_client_lock_synced_data(endpoint->client); } - AWS_ASSERT(endpoint->client_synced_data.ref_count > 0); ++endpoint->client_synced_data.ref_count; if (!already_holding_lock) { @@ -278,29 +262,32 @@ static void s_s3_endpoint_release(struct aws_s3_endpoint *endpoint) { /* BEGIN CRITICAL SECTION */ aws_s3_client_lock_synced_data(endpoint->client); - bool should_destroy = (endpoint->client_synced_data.ref_count == 1); + bool should_destroy = endpoint->client_synced_data.ref_count == 1 && !endpoint->client->synced_data.active; if (should_destroy) { aws_hash_table_remove(&endpoint->client->synced_data.endpoints, endpoint->host_name, NULL, NULL); - } else { - --endpoint->client_synced_data.ref_count; } + --endpoint->client_synced_data.ref_count; aws_s3_client_unlock_synced_data(endpoint->client); /* END CRITICAL SECTION */ if (should_destroy) { - /* The endpoint may have async cleanup to do (connection manager). + /* Do a sync cleanup since client is getting destroyed to avoid any cleanup delay. + * The endpoint may have async cleanup to do (connection manager). * When that's all done we'll invoke a completion callback. * Since it's a crime to hold a lock while invoking a callback, - * we make sure that we've released the client's lock before proceeding... */ - s_s3_endpoint_ref_count_zero(endpoint); + * we make sure that we've released the client's lock before proceeding... + */ + aws_s3_endpoint_destroy(endpoint); } } -static void s_s3_endpoint_ref_count_zero(struct aws_s3_endpoint *endpoint) { +void aws_s3_endpoint_destroy(struct aws_s3_endpoint *endpoint) { AWS_PRECONDITION(endpoint); AWS_PRECONDITION(endpoint->http_connection_manager); + AWS_FATAL_ASSERT(endpoint->client_synced_data.ref_count == 0); + struct aws_http_connection_manager *http_connection_manager = endpoint->http_connection_manager; endpoint->http_connection_manager = NULL; diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_list_objects.c b/contrib/restricted/aws/aws-c-s3/source/s3_list_objects.c index 117bd983799..767900d88f7 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_list_objects.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_list_objects.c @@ -54,68 +54,67 @@ struct fs_parser_wrapper { }; /* invoked when the ListBucketResult/Contents node is iterated. */ -static bool s_on_contents_node(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) { +static int s_on_contents_node(struct aws_xml_node *node, void *user_data) { struct fs_parser_wrapper *fs_wrapper = user_data; struct aws_s3_object_info *fs_info = &fs_wrapper->fs_info; /* for each Contents node, get the info from it and send it off as an object we've encountered */ - struct aws_byte_cursor node_name; - aws_xml_node_get_name(node, &node_name); + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ETag")) { - return aws_xml_node_as_body(parser, node, &fs_info->e_tag) == AWS_OP_SUCCESS; + return aws_xml_node_as_body(node, &fs_info->e_tag); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Key")) { - return aws_xml_node_as_body(parser, node, &fs_info->key) == AWS_OP_SUCCESS; + return aws_xml_node_as_body(node, &fs_info->key); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "LastModified")) { struct aws_byte_cursor date_cur; - if (aws_xml_node_as_body(parser, node, &date_cur) == AWS_OP_SUCCESS) { - aws_date_time_init_from_str_cursor(&fs_info->last_modified, &date_cur, AWS_DATE_FORMAT_ISO_8601); - return true; + if (aws_xml_node_as_body(node, &date_cur) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; } - return false; + if (aws_date_time_init_from_str_cursor(&fs_info->last_modified, &date_cur, AWS_DATE_FORMAT_ISO_8601)) { + return AWS_OP_ERR; + } + + return AWS_OP_SUCCESS; } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Size")) { struct aws_byte_cursor size_cur; + if (aws_xml_node_as_body(node, &size_cur) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; + } - if (aws_xml_node_as_body(parser, node, &size_cur) == AWS_OP_SUCCESS) { - if (aws_byte_cursor_utf8_parse_u64(size_cur, &fs_info->size)) { - return false; - } - return true; + if (aws_byte_cursor_utf8_parse_u64(size_cur, &fs_info->size) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; } + + return AWS_OP_SUCCESS; } - return true; + return AWS_OP_SUCCESS; } /* invoked when the ListBucketResult/CommonPrefixes node is iterated. */ -static bool s_on_common_prefixes_node(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) { +static int s_on_common_prefixes_node(struct aws_xml_node *node, void *user_data) { struct fs_parser_wrapper *fs_wrapper = user_data; - struct aws_byte_cursor node_name; - aws_xml_node_get_name(node, &node_name); + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Prefix")) { - return aws_xml_node_as_body(parser, node, &fs_wrapper->fs_info.prefix) == AWS_OP_SUCCESS; + return aws_xml_node_as_body(node, &fs_wrapper->fs_info.prefix); } - return true; + return AWS_OP_SUCCESS; } -static bool s_on_list_bucket_result_node_encountered( - struct aws_xml_parser *parser, - struct aws_xml_node *node, - void *user_data) { +static int s_on_list_bucket_result_node_encountered(struct aws_xml_node *node, void *user_data) { struct aws_s3_operation_data *operation_data = user_data; - struct aws_byte_cursor node_name; - aws_xml_node_get_name(node, &node_name); + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); struct fs_parser_wrapper fs_wrapper; AWS_ZERO_STRUCT(fs_wrapper); @@ -124,30 +123,23 @@ static bool s_on_list_bucket_result_node_encountered( fs_wrapper.allocator = operation_data->allocator; /* this will traverse the current Contents node, get the metadata necessary to construct * an instance of fs_info so we can invoke the callback on it. This happens once per object. */ - bool ret_val = aws_xml_node_traverse(parser, node, s_on_contents_node, &fs_wrapper) == AWS_OP_SUCCESS; + if (aws_xml_node_traverse(node, s_on_contents_node, &fs_wrapper) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; + } if (operation_data->prefix && !fs_wrapper.fs_info.prefix.len) { fs_wrapper.fs_info.prefix = aws_byte_cursor_from_string(operation_data->prefix); } - struct aws_byte_buf trimmed_etag; - AWS_ZERO_STRUCT(trimmed_etag); + struct aws_byte_buf trimmed_etag = aws_replace_quote_entities(fs_wrapper.allocator, fs_wrapper.fs_info.e_tag); + fs_wrapper.fs_info.e_tag = aws_byte_cursor_from_buf(&trimmed_etag); - if (fs_wrapper.fs_info.e_tag.len) { - struct aws_string *quoted_etag_str = - aws_string_new_from_cursor(fs_wrapper.allocator, &fs_wrapper.fs_info.e_tag); - replace_quote_entities(fs_wrapper.allocator, quoted_etag_str, &trimmed_etag); - fs_wrapper.fs_info.e_tag = aws_byte_cursor_from_buf(&trimmed_etag); - aws_string_destroy(quoted_etag_str); + int ret_val = AWS_OP_SUCCESS; + if (operation_data->on_object) { + ret_val = operation_data->on_object(&fs_wrapper.fs_info, operation_data->user_data); } - if (ret_val && operation_data->on_object) { - ret_val |= operation_data->on_object(&fs_wrapper.fs_info, operation_data->user_data); - } - - if (trimmed_etag.len) { - aws_byte_buf_clean_up(&trimmed_etag); - } + aws_byte_buf_clean_up(&trimmed_etag); return ret_val; } @@ -155,16 +147,18 @@ static bool s_on_list_bucket_result_node_encountered( if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "CommonPrefixes")) { /* this will traverse the current CommonPrefixes node, get the metadata necessary to construct * an instance of fs_info so we can invoke the callback on it. This happens once per prefix. */ - bool ret_val = aws_xml_node_traverse(parser, node, s_on_common_prefixes_node, &fs_wrapper) == AWS_OP_SUCCESS; - - if (ret_val && operation_data->on_object) { - ret_val |= operation_data->on_object(&fs_wrapper.fs_info, operation_data->user_data); + if (aws_xml_node_traverse(node, s_on_common_prefixes_node, &fs_wrapper) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; } + int ret_val = AWS_OP_SUCCESS; + if (operation_data->on_object) { + ret_val = operation_data->on_object(&fs_wrapper.fs_info, operation_data->user_data); + } return ret_val; } - return true; + return AWS_OP_SUCCESS; } static int s_construct_next_request_http_message( @@ -235,14 +229,12 @@ struct aws_s3_paginator *aws_s3_initiate_list_objects( aws_ref_count_init(&operation_data->ref_count, operation_data, s_ref_count_zero_callback); - struct aws_byte_cursor xml_result_node_name = aws_byte_cursor_from_c_str("ListBucketResult"); - struct aws_byte_cursor continuation_node_name = aws_byte_cursor_from_c_str("NextContinuationToken"); struct aws_s3_paginated_operation_params operation_params = { .next_message = s_construct_next_request_http_message, .on_result_node_encountered_fn = s_on_list_bucket_result_node_encountered, .on_paginated_operation_cleanup = s_on_paginator_cleanup, - .result_xml_node_name = &xml_result_node_name, - .continuation_token_node_name = &continuation_node_name, + .result_xml_node_name = aws_byte_cursor_from_c_str("ListBucketResult"), + .continuation_token_node_name = aws_byte_cursor_from_c_str("NextContinuationToken"), .user_data = operation_data, }; @@ -264,37 +256,3 @@ struct aws_s3_paginator *aws_s3_initiate_list_objects( return paginator; } - -struct aws_s3_paginated_operation *aws_s3_list_objects_operation_new( - struct aws_allocator *allocator, - const struct aws_s3_list_objects_params *params) { - AWS_FATAL_PRECONDITION(params); - AWS_FATAL_PRECONDITION(params->client); - AWS_FATAL_PRECONDITION(params->bucket_name.len); - AWS_FATAL_PRECONDITION(params->endpoint.len); - - struct aws_s3_operation_data *operation_data = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_operation_data)); - operation_data->allocator = allocator; - operation_data->delimiter = - params->delimiter.len > 0 ? aws_string_new_from_cursor(allocator, ¶ms->delimiter) : NULL; - operation_data->prefix = params->prefix.len > 0 ? aws_string_new_from_cursor(allocator, ¶ms->prefix) : NULL; - operation_data->on_object = params->on_object; - operation_data->user_data = params->user_data; - - aws_ref_count_init(&operation_data->ref_count, operation_data, s_ref_count_zero_callback); - - struct aws_byte_cursor xml_result_node_name = aws_byte_cursor_from_c_str("ListBucketResult"); - struct aws_byte_cursor continuation_node_name = aws_byte_cursor_from_c_str("NextContinuationToken"); - struct aws_s3_paginated_operation_params operation_params = { - .next_message = s_construct_next_request_http_message, - .on_result_node_encountered_fn = s_on_list_bucket_result_node_encountered, - .on_paginated_operation_cleanup = s_on_paginator_cleanup, - .result_xml_node_name = &xml_result_node_name, - .continuation_token_node_name = &continuation_node_name, - .user_data = operation_data, - }; - - struct aws_s3_paginated_operation *operation = aws_s3_paginated_operation_new(allocator, &operation_params); - - return operation; -} diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_list_parts.c b/contrib/restricted/aws/aws-c-s3/source/s3_list_parts.c index 8856e6aa184..2ede97e10bb 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_list_parts.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_list_parts.c @@ -53,117 +53,110 @@ struct result_wrapper { struct aws_s3_part_info part_info; }; -/* invoked when the ListPartResult/Parts node is iterated. */ -static bool s_on_parts_node(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) { +/* invoked as each child element of ListPartResult/Part is iterated. */ +static int s_xml_on_Part_child(struct aws_xml_node *node, void *user_data) { struct result_wrapper *result_wrapper = user_data; struct aws_s3_part_info *part_info = &result_wrapper->part_info; /* for each Parts node, get the info from it and send it off as an part we've encountered */ - struct aws_byte_cursor node_name; - aws_xml_node_get_name(node, &node_name); + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ETag")) { - return aws_xml_node_as_body(parser, node, &part_info->e_tag) == AWS_OP_SUCCESS; + return aws_xml_node_as_body(node, &part_info->e_tag); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "LastModified")) { struct aws_byte_cursor date_cur; - if (aws_xml_node_as_body(parser, node, &date_cur) == AWS_OP_SUCCESS) { - aws_date_time_init_from_str_cursor(&part_info->last_modified, &date_cur, AWS_DATE_FORMAT_ISO_8601); - return true; + if (aws_xml_node_as_body(node, &date_cur) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; } - return false; + if (aws_date_time_init_from_str_cursor(&part_info->last_modified, &date_cur, AWS_DATE_FORMAT_ISO_8601)) { + return AWS_OP_ERR; + } + + return AWS_OP_SUCCESS; } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Size")) { struct aws_byte_cursor size_cur; - if (aws_xml_node_as_body(parser, node, &size_cur) == AWS_OP_SUCCESS) { - if (aws_byte_cursor_utf8_parse_u64(size_cur, &part_info->size)) { - return false; - } - return true; + if (aws_xml_node_as_body(node, &size_cur) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; + } + if (aws_byte_cursor_utf8_parse_u64(size_cur, &part_info->size) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; } + return AWS_OP_SUCCESS; } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "PartNumber")) { struct aws_byte_cursor part_number_cur; - if (aws_xml_node_as_body(parser, node, &part_number_cur) == AWS_OP_SUCCESS) { - uint64_t part_number = 0; - if (aws_byte_cursor_utf8_parse_u64(part_number_cur, &part_number)) { - return false; - } - if (part_number > UINT32_MAX) { - aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); - return false; - } - part_info->part_number = (uint32_t)part_number; - return true; + if (aws_xml_node_as_body(node, &part_number_cur) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; + } + + uint64_t part_number = 0; + if (aws_byte_cursor_utf8_parse_u64(part_number_cur, &part_number) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; + } + if (part_number > UINT32_MAX) { + return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } + part_info->part_number = (uint32_t)part_number; + return AWS_OP_SUCCESS; } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ChecksumCRC32")) { - return aws_xml_node_as_body(parser, node, &part_info->checksumCRC32) == AWS_OP_SUCCESS; + return aws_xml_node_as_body(node, &part_info->checksumCRC32); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ChecksumCRC32C")) { - return aws_xml_node_as_body(parser, node, &part_info->checksumCRC32C) == AWS_OP_SUCCESS; + return aws_xml_node_as_body(node, &part_info->checksumCRC32C); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ChecksumSHA1")) { - return aws_xml_node_as_body(parser, node, &part_info->checksumSHA1) == AWS_OP_SUCCESS; + return aws_xml_node_as_body(node, &part_info->checksumSHA1); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ChecksumSHA256")) { - return aws_xml_node_as_body(parser, node, &part_info->checksumSHA256) == AWS_OP_SUCCESS; + return aws_xml_node_as_body(node, &part_info->checksumSHA256); } - return true; + return AWS_OP_SUCCESS; } -static bool s_on_list_bucket_result_node_encountered( - struct aws_xml_parser *parser, - struct aws_xml_node *node, - void *user_data) { +static int s_xml_on_ListPartsResult_child(struct aws_xml_node *node, void *user_data) { struct aws_s3_operation_data *operation_data = user_data; - struct aws_byte_cursor node_name; - aws_xml_node_get_name(node, &node_name); - - struct result_wrapper result_wrapper; - AWS_ZERO_STRUCT(result_wrapper); - + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Part")) { - result_wrapper.allocator = operation_data->allocator; + struct result_wrapper result_wrapper = { + .allocator = operation_data->allocator, + }; + /* this will traverse the current Parts node, get the metadata necessary to construct - * an instance of fs_info so we can invoke the callback on it. This happens once per part. */ - bool ret_val = aws_xml_node_traverse(parser, node, s_on_parts_node, &result_wrapper) == AWS_OP_SUCCESS; - - struct aws_byte_buf trimmed_etag; - AWS_ZERO_STRUCT(trimmed_etag); - - if (result_wrapper.part_info.e_tag.len) { - struct aws_string *quoted_etag_str = - aws_string_new_from_cursor(result_wrapper.allocator, &result_wrapper.part_info.e_tag); - replace_quote_entities(result_wrapper.allocator, quoted_etag_str, &trimmed_etag); - result_wrapper.part_info.e_tag = aws_byte_cursor_from_buf(&trimmed_etag); - aws_string_destroy(quoted_etag_str); + * an instance of part_info so we can invoke the callback on it. This happens once per part. */ + if (aws_xml_node_traverse(node, s_xml_on_Part_child, &result_wrapper) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; } - if (ret_val && operation_data->on_part) { - ret_val |= operation_data->on_part(&result_wrapper.part_info, operation_data->user_data); - } + struct aws_byte_buf trimmed_etag = + aws_replace_quote_entities(result_wrapper.allocator, result_wrapper.part_info.e_tag); + result_wrapper.part_info.e_tag = aws_byte_cursor_from_buf(&trimmed_etag); - if (trimmed_etag.len) { - aws_byte_buf_clean_up(&trimmed_etag); + int ret_val = AWS_OP_SUCCESS; + if (operation_data->on_part) { + ret_val = operation_data->on_part(&result_wrapper.part_info, operation_data->user_data); } + aws_byte_buf_clean_up(&trimmed_etag); + return ret_val; } - return true; + return AWS_OP_SUCCESS; } static int s_construct_next_request_http_message( @@ -210,56 +203,6 @@ static int s_construct_next_request_http_message( return AWS_OP_SUCCESS; } -struct aws_s3_paginator *aws_s3_initiate_list_parts( - struct aws_allocator *allocator, - const struct aws_s3_list_parts_params *params) { - AWS_FATAL_PRECONDITION(params); - AWS_FATAL_PRECONDITION(params->client); - AWS_FATAL_PRECONDITION(params->bucket_name.len); - AWS_FATAL_PRECONDITION(params->key.len); - AWS_FATAL_PRECONDITION(params->upload_id.len); - AWS_FATAL_PRECONDITION(params->endpoint.len); - - struct aws_s3_operation_data *operation_data = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_operation_data)); - operation_data->allocator = allocator; - operation_data->key = aws_string_new_from_cursor(allocator, ¶ms->key); - operation_data->upload_id = aws_string_new_from_cursor(allocator, ¶ms->upload_id); - operation_data->on_part = params->on_part; - operation_data->user_data = params->user_data; - - aws_ref_count_init(&operation_data->ref_count, operation_data, s_ref_count_zero_callback); - - struct aws_byte_cursor xml_result_node_name = aws_byte_cursor_from_c_str("ListPartsResult"); - const struct aws_byte_cursor continuation_node_name = aws_byte_cursor_from_c_str("NextPartNumberMarker"); - - struct aws_s3_paginated_operation_params operation_params = { - .next_message = s_construct_next_request_http_message, - .on_result_node_encountered_fn = s_on_list_bucket_result_node_encountered, - .on_paginated_operation_cleanup = s_on_paginator_cleanup, - .result_xml_node_name = &xml_result_node_name, - .continuation_token_node_name = &continuation_node_name, - .user_data = operation_data, - }; - - struct aws_s3_paginated_operation *operation = aws_s3_paginated_operation_new(allocator, &operation_params); - - struct aws_s3_paginator_params paginator_params = { - .client = params->client, - .bucket_name = params->bucket_name, - .endpoint = params->endpoint, - .operation = operation, - .on_page_finished_fn = params->on_list_finished, - .user_data = params->user_data, - }; - - struct aws_s3_paginator *paginator = aws_s3_initiate_paginator(allocator, &paginator_params); - - // transfer control to paginator - aws_s3_paginated_operation_release(operation); - - return paginator; -} - struct aws_s3_paginated_operation *aws_s3_list_parts_operation_new( struct aws_allocator *allocator, const struct aws_s3_list_parts_params *params) { @@ -276,15 +219,12 @@ struct aws_s3_paginated_operation *aws_s3_list_parts_operation_new( aws_ref_count_init(&operation_data->ref_count, operation_data, s_ref_count_zero_callback); - struct aws_byte_cursor xml_result_node_name = aws_byte_cursor_from_c_str("ListPartsResult"); - const struct aws_byte_cursor continuation_node_name = aws_byte_cursor_from_c_str("NextPartNumberMarker"); - struct aws_s3_paginated_operation_params operation_params = { .next_message = s_construct_next_request_http_message, - .on_result_node_encountered_fn = s_on_list_bucket_result_node_encountered, + .on_result_node_encountered_fn = s_xml_on_ListPartsResult_child, .on_paginated_operation_cleanup = s_on_paginator_cleanup, - .result_xml_node_name = &xml_result_node_name, - .continuation_token_node_name = &continuation_node_name, + .result_xml_node_name = aws_byte_cursor_from_c_str("ListPartsResult"), + .continuation_token_node_name = aws_byte_cursor_from_c_str("NextPartNumberMarker"), .user_data = operation_data, }; diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_meta_request.c b/contrib/restricted/aws/aws-c-s3/source/s3_meta_request.c index ff521dc9fbf..d33488371f3 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_meta_request.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_meta_request.c @@ -4,25 +4,32 @@ */ #include "aws/s3/private/s3_auto_ranged_get.h" +#include "aws/s3/private/s3_auto_ranged_put.h" #include "aws/s3/private/s3_checksums.h" #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_meta_request_impl.h" +#include "aws/s3/private/s3_parallel_input_stream.h" #include "aws/s3/private/s3_request_messages.h" #include "aws/s3/private/s3_util.h" +#include "aws/s3/s3express_credentials_provider.h" #include <aws/auth/signable.h> #include <aws/auth/signing.h> #include <aws/auth/signing_config.h> #include <aws/auth/signing_result.h> +#include <aws/common/clock.h> #include <aws/common/encoding.h> #include <aws/common/string.h> #include <aws/common/system_info.h> +#include <aws/io/async_stream.h> #include <aws/io/event_loop.h> #include <aws/io/retry_strategy.h> +#include <aws/io/socket.h> #include <aws/io/stream.h> #include <inttypes.h> static const size_t s_dynamic_body_initial_buf_size = KB_TO_BYTES(1); static const size_t s_default_body_streaming_priority_queue_size = 16; +static const size_t s_default_event_delivery_array_size = 16; static int s_s3_request_priority_queue_pred(const void *a, const void *b); static void s_s3_meta_request_destroy(void *user_data); @@ -54,6 +61,16 @@ static int s_s3_meta_request_incoming_headers( size_t headers_count, void *user_data); +static int s_s3_meta_request_headers_block_done( + struct aws_http_stream *stream, + enum aws_http_header_block header_block, + void *user_data); + +static void s_s3_meta_request_stream_metrics( + struct aws_http_stream *stream, + const struct aws_http_stream_metrics *metrics, + void *user_data); + static void s_s3_meta_request_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data); static void s_s3_meta_request_send_request_finish( @@ -61,6 +78,11 @@ static void s_s3_meta_request_send_request_finish( struct aws_http_stream *stream, int error_code); +static int s_s3_meta_request_read_from_pending_async_writes( + struct aws_s3_meta_request *meta_request, + struct aws_byte_buf *dest, + bool *eof); + void aws_s3_meta_request_lock_synced_data(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); @@ -84,7 +106,8 @@ static int s_meta_request_get_response_headers_checksum_callback( continue; } const struct aws_byte_cursor *algorithm_header_name = aws_get_http_header_name_from_algorithm(i); - if (aws_http_headers_has(headers, *algorithm_header_name)) { + if (aws_http_headers_has(headers, *algorithm_header_name) && + !aws_http_headers_has(headers, g_mp_parts_count_header_name)) { struct aws_byte_cursor header_sum; aws_http_headers_get(headers, *algorithm_header_name, &header_sum); size_t encoded_len = 0; @@ -92,8 +115,8 @@ static int s_meta_request_get_response_headers_checksum_callback( if (header_sum.len == encoded_len - 1) { /* encoded_len includes the nullptr length. -1 is the expected length. */ aws_byte_buf_init_copy_from_cursor( - &meta_request->meta_request_level_response_header_checksum, aws_default_allocator(), header_sum); - meta_request->meta_request_level_running_response_sum = aws_checksum_new(aws_default_allocator(), i); + &meta_request->meta_request_level_response_header_checksum, meta_request->allocator, header_sum); + meta_request->meta_request_level_running_response_sum = aws_checksum_new(meta_request->allocator, i); } break; } @@ -105,7 +128,7 @@ static int s_meta_request_get_response_headers_checksum_callback( } } -/* warning this might get screwed up with retrys/restarts */ +/* warning this might get screwed up with retries/restarts */ static int s_meta_request_get_response_body_checksum_callback( struct aws_s3_meta_request *meta_request, const struct aws_byte_cursor *body, @@ -141,10 +164,10 @@ static void s_meta_request_get_response_finish_checksum_callback( /* what error should I raise for these? */ aws_base64_compute_encoded_len( meta_request->meta_request_level_running_response_sum->digest_size, &encoded_checksum_len); - aws_byte_buf_init(&encoded_response_body_sum, aws_default_allocator(), encoded_checksum_len); + aws_byte_buf_init(&encoded_response_body_sum, meta_request->allocator, encoded_checksum_len); aws_byte_buf_init( &response_body_sum, - aws_default_allocator(), + meta_request->allocator, meta_request->meta_request_level_running_response_sum->digest_size); aws_checksum_finalize(meta_request->meta_request_level_running_response_sum, &response_body_sum, 0); struct aws_byte_cursor response_body_sum_cursor = aws_byte_cursor_from_buf(&response_body_sum); @@ -192,6 +215,7 @@ int aws_s3_meta_request_init_base( meta_request->type = options->type; /* Set up reference count. */ aws_ref_count_init(&meta_request->ref_count, meta_request, s_s3_meta_request_destroy); + aws_linked_list_init(&meta_request->synced_data.cancellable_http_streams_list); if (part_size == SIZE_MAX) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); @@ -216,40 +240,62 @@ int aws_s3_meta_request_init_base( goto error; } + aws_array_list_init_dynamic( + &meta_request->synced_data.event_delivery_array, + meta_request->allocator, + s_default_event_delivery_array_size, + sizeof(struct aws_s3_meta_request_event)); + + aws_array_list_init_dynamic( + &meta_request->io_threaded_data.event_delivery_array, + meta_request->allocator, + s_default_event_delivery_array_size, + sizeof(struct aws_s3_meta_request_event)); + *((size_t *)&meta_request->part_size) = part_size; *((bool *)&meta_request->should_compute_content_md5) = should_compute_content_md5; checksum_config_init(&meta_request->checksum_config, options->checksum_config); - if (options->signing_config) { - meta_request->cached_signing_config = aws_cached_signing_config_new(allocator, options->signing_config); - } - /* Set initial_meta_request */ - if (options->send_filepath.len > 0) { - /* Create copy of original message, but with body-stream that reads directly from file */ - meta_request->initial_request_message = aws_s3_message_util_copy_http_message_filepath_body_all_headers( - allocator, options->message, options->send_filepath); - if (meta_request->initial_request_message == NULL) { - goto error; - } - } else { - /* Keep a reference to the original message structure passed in. */ - meta_request->initial_request_message = aws_http_message_acquire(options->message); + if (options->signing_config) { + meta_request->cached_signing_config = aws_cached_signing_config_new(client, options->signing_config); } /* Client is currently optional to allow spinning up a meta_request without a client in a test. */ if (client != NULL) { - aws_s3_client_acquire(client); - meta_request->client = client; + meta_request->client = aws_s3_client_acquire(client); meta_request->io_event_loop = aws_event_loop_group_get_next_loop(client->body_streaming_elg); meta_request->synced_data.read_window_running_total = client->initial_read_window; } + /* Keep original message around, for headers, method, and synchronous body-stream (if any) */ + meta_request->initial_request_message = aws_http_message_acquire(options->message); + + /* If the request's body is being passed in some other way, set that up. + * (we checked earlier that the request body is not being passed multiple ways) */ + if (options->send_filepath.len > 0) { + /* Create parallel read stream from file */ + meta_request->request_body_parallel_stream = + client->vtable->parallel_input_stream_new_from_file(allocator, options->send_filepath); + if (meta_request->request_body_parallel_stream == NULL) { + goto error; + } + + } else if (options->send_async_stream != NULL) { + meta_request->request_body_async_stream = aws_async_input_stream_acquire(options->send_async_stream); + + } else if (options->send_using_async_writes == true) { + meta_request->request_body_using_async_writes = true; + aws_byte_buf_init(&meta_request->synced_data.async_write.buffered_data, allocator, 0); + } + meta_request->synced_data.next_streaming_part = 1; meta_request->meta_request_level_running_response_sum = NULL; meta_request->user_data = options->user_data; meta_request->shutdown_callback = options->shutdown_callback; meta_request->progress_callback = options->progress_callback; + meta_request->telemetry_callback = options->telemetry_callback; + meta_request->upload_review_callback = options->upload_review_callback; if (meta_request->checksum_config.validate_response_checksum) { /* TODO: the validate for auto range get should happen for each response received. */ @@ -308,11 +354,28 @@ void aws_s3_meta_request_increment_read_window(struct aws_s3_meta_request *meta_ } void aws_s3_meta_request_cancel(struct aws_s3_meta_request *meta_request) { + struct aws_future_void *write_future_to_cancel = NULL; + /* BEGIN CRITICAL SECTION */ aws_s3_meta_request_lock_synced_data(meta_request); aws_s3_meta_request_set_fail_synced(meta_request, NULL, AWS_ERROR_S3_CANCELED); + aws_s3_meta_request_cancel_cancellable_requests_synced(meta_request, AWS_ERROR_S3_CANCELED); + if (meta_request->synced_data.async_write.future != NULL) { + write_future_to_cancel = meta_request->synced_data.async_write.future; + meta_request->synced_data.async_write.future = NULL; + } aws_s3_meta_request_unlock_synced_data(meta_request); /* END CRITICAL SECTION */ + + if (write_future_to_cancel != NULL) { + AWS_LOGF_TRACE( + AWS_LS_S3_META_REQUEST, "id=%p: write future complete due to cancellation", (void *)meta_request); + aws_future_void_set_error(write_future_to_cancel, AWS_ERROR_S3_REQUEST_HAS_COMPLETED); + aws_future_void_release(write_future_to_cancel); + } + + /* Schedule the work task, to continue processing the meta-request */ + aws_s3_client_schedule_process_work(meta_request->client); } int aws_s3_meta_request_pause( @@ -337,6 +400,16 @@ void aws_s3_meta_request_set_fail_synced( AWS_PRECONDITION(meta_request); ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); + /* Protect against bugs */ + if (error_code == AWS_ERROR_SUCCESS) { + AWS_ASSERT(false); + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p Meta request failed but error code not set, AWS_ERROR_UNKNOWN will be reported", + (void *)meta_request); + error_code = AWS_ERROR_UNKNOWN; + } + if (meta_request->synced_data.finish_result_set) { return; } @@ -414,49 +487,60 @@ struct aws_s3_meta_request *aws_s3_meta_request_release(struct aws_s3_meta_reque static void s_s3_meta_request_destroy(void *user_data) { struct aws_s3_meta_request *meta_request = user_data; AWS_PRECONDITION(meta_request); + void *log_id = meta_request; AWS_LOGF_DEBUG(AWS_LS_S3_META_REQUEST, "id=%p Cleaning up meta request", (void *)meta_request); /* Clean up our initial http message */ - if (meta_request->initial_request_message != NULL) { - aws_http_message_release(meta_request->initial_request_message); - meta_request->initial_request_message = NULL; - } + meta_request->request_body_async_stream = aws_async_input_stream_release(meta_request->request_body_async_stream); + meta_request->initial_request_message = aws_http_message_release(meta_request->initial_request_message); void *meta_request_user_data = meta_request->user_data; aws_s3_meta_request_shutdown_fn *shutdown_callback = meta_request->shutdown_callback; aws_cached_signing_config_destroy(meta_request->cached_signing_config); + aws_string_destroy(meta_request->s3express_session_host); aws_mutex_clean_up(&meta_request->synced_data.lock); /* endpoint should have already been released and set NULL by the meta request finish call. * But call release() again, just in case we're tearing down a half-initialized meta request */ aws_s3_endpoint_release(meta_request->endpoint); meta_request->client = aws_s3_client_release(meta_request->client); + AWS_ASSERT(aws_priority_queue_size(&meta_request->synced_data.pending_body_streaming_requests) == 0); aws_priority_queue_clean_up(&meta_request->synced_data.pending_body_streaming_requests); + + AWS_ASSERT(aws_array_list_length(&meta_request->synced_data.event_delivery_array) == 0); + aws_array_list_clean_up(&meta_request->synced_data.event_delivery_array); + + AWS_ASSERT(aws_array_list_length(&meta_request->io_threaded_data.event_delivery_array) == 0); + aws_array_list_clean_up(&meta_request->io_threaded_data.event_delivery_array); + + AWS_ASSERT(aws_linked_list_empty(&meta_request->synced_data.cancellable_http_streams_list)); + aws_s3_meta_request_result_clean_up(meta_request, &meta_request->synced_data.finish_result); + aws_byte_buf_clean_up(&meta_request->synced_data.async_write.buffered_data); + if (meta_request->vtable != NULL) { - AWS_LOGF_TRACE( - AWS_LS_S3_META_REQUEST, "id=%p Calling virtual meta request destroy function.", (void *)meta_request); + AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p Calling virtual meta request destroy function.", log_id); meta_request->vtable->destroy(meta_request); } meta_request = NULL; if (shutdown_callback != NULL) { - AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p Calling meta request shutdown callback.", (void *)meta_request); + AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p Calling meta request shutdown callback.", log_id); shutdown_callback(meta_request_user_data); } - AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p Meta request clean up finished.", (void *)meta_request); + AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p Meta request clean up finished.", log_id); } static int s_s3_request_priority_queue_pred(const void *a, const void *b) { - const struct aws_s3_request **request_a = (const struct aws_s3_request **)a; + const struct aws_s3_request *const *request_a = a; AWS_PRECONDITION(request_a); AWS_PRECONDITION(*request_a); - const struct aws_s3_request **request_b = (const struct aws_s3_request **)b; + const struct aws_s3_request *const *request_b = b; AWS_PRECONDITION(request_b); AWS_PRECONDITION(*request_b); @@ -499,7 +583,10 @@ bool aws_s3_meta_request_is_finished(struct aws_s3_meta_request *meta_request) { } static void s_s3_meta_request_prepare_request_task(struct aws_task *task, void *arg, enum aws_task_status task_status); +static void s_s3_meta_request_on_request_prepared(void *user_data); +/* TODO: document how this is final step in prepare-request sequence. + * Could be invoked on any thread. */ static void s_s3_prepare_request_payload_callback_and_destroy( struct aws_s3_prepare_request_payload *payload, int error_code) { @@ -509,18 +596,30 @@ static void s_s3_prepare_request_payload_callback_and_destroy( struct aws_s3_meta_request *meta_request = payload->request->meta_request; AWS_PRECONDITION(meta_request); - AWS_PRECONDITION(meta_request->client); - struct aws_s3_client *client = aws_s3_client_acquire(meta_request->client); + ++payload->request->num_times_prepared; - struct aws_allocator *allocator = client->allocator; - AWS_PRECONDITION(allocator); + if (error_code) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p Could not prepare request %p due to error %d (%s).", + (void *)meta_request, + (void *)payload->request, + error_code, + aws_error_str(error_code)); + + /* BEGIN CRITICAL SECTION */ + aws_s3_meta_request_lock_synced_data(meta_request); + aws_s3_meta_request_set_fail_synced(meta_request, payload->request, error_code); + aws_s3_meta_request_unlock_synced_data(meta_request); + /* END CRITICAL SECTION */ + } if (payload->callback != NULL) { payload->callback(meta_request, payload->request, error_code, payload->user_data); } - aws_mem_release(allocator, payload); - aws_s3_client_release(client); + aws_future_void_release(payload->asyncstep_prepare_request); + aws_mem_release(payload->allocator, payload); } static void s_s3_meta_request_schedule_prepare_request_default( @@ -561,13 +660,21 @@ static void s_s3_meta_request_schedule_prepare_request_default( struct aws_s3_prepare_request_payload *payload = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_prepare_request_payload)); + payload->allocator = allocator; payload->request = request; payload->callback = callback; payload->user_data = user_data; aws_task_init( &payload->task, s_s3_meta_request_prepare_request_task, payload, "s3_meta_request_prepare_request_task"); - aws_event_loop_schedule_task_now(meta_request->io_event_loop, &payload->task); + if (meta_request->request_body_parallel_stream) { + /* The body stream supports reading in parallel, so schedule task on any I/O thread. + * If we always used the meta-request's dedicated io_event_loop, we wouldn't get any parallelism. */ + struct aws_event_loop *loop = aws_event_loop_group_get_next_loop(client->body_streaming_elg); + aws_event_loop_schedule_task_now(loop, &payload->task); + } else { + aws_event_loop_schedule_task_now(meta_request->io_event_loop, &payload->task); + } } static void s_s3_meta_request_prepare_request_task(struct aws_task *task, void *arg, enum aws_task_status task_status) { @@ -583,54 +690,41 @@ static void s_s3_meta_request_prepare_request_task(struct aws_task *task, void * struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); - struct aws_s3_meta_request_vtable *vtable = meta_request->vtable; + const struct aws_s3_meta_request_vtable *vtable = meta_request->vtable; AWS_PRECONDITION(vtable); /* Client owns this event loop group. A cancel should not be possible. */ AWS_ASSERT(task_status == AWS_TASK_STATUS_RUN_READY); - int error_code = AWS_ERROR_SUCCESS; - if (!request->always_send && aws_s3_meta_request_has_finish_result(meta_request)) { - aws_raise_error(AWS_ERROR_S3_CANCELED); - goto dont_send_clean_up; - } - - if (vtable->prepare_request(meta_request, request)) { - ++request->num_times_prepared; - goto dont_send_clean_up; + s_s3_prepare_request_payload_callback_and_destroy(payload, AWS_ERROR_S3_CANCELED); + return; } - ++request->num_times_prepared; - - aws_s3_add_user_agent_header(meta_request->allocator, request->send_data.message); - - /* Sign the newly created message. */ - s_s3_meta_request_sign_request(meta_request, request, s_s3_meta_request_request_on_signed, payload); - + /* Kick off the async vtable->prepare_request() + * Each subclass has its own implementation of this. */ + payload->asyncstep_prepare_request = vtable->prepare_request(request); + aws_future_void_register_callback( + payload->asyncstep_prepare_request, s_s3_meta_request_on_request_prepared, payload); return; +} -dont_send_clean_up: - - error_code = aws_last_error_or_unknown(); - - AWS_LOGF_ERROR( - AWS_LS_S3_META_REQUEST, - "id=%p Could not prepare request %p due to error %d (%s).", - (void *)meta_request, - (void *)request, - error_code, - aws_error_str(error_code)); +/* Called after vtable->prepare_request has succeeded or failed. */ +static void s_s3_meta_request_on_request_prepared(void *user_data) { + struct aws_s3_prepare_request_payload *payload = user_data; + struct aws_s3_request *request = payload->request; + struct aws_s3_meta_request *meta_request = request->meta_request; - /* BEGIN CRITICAL SECTION */ - { - aws_s3_meta_request_lock_synced_data(meta_request); - aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); - aws_s3_meta_request_unlock_synced_data(meta_request); + int error_code = aws_future_void_get_error(payload->asyncstep_prepare_request); + if (error_code) { + s_s3_prepare_request_payload_callback_and_destroy(payload, error_code); + return; } - /* END CRITICAL SECTION */ - s_s3_prepare_request_payload_callback_and_destroy(payload, error_code); + aws_s3_add_user_agent_header(meta_request->allocator, request->send_data.message); + + /* Next step is to sign the newly created message (completion callback could happen on any thread) */ + s_s3_meta_request_sign_request(meta_request, request, s_s3_meta_request_request_on_signed, payload); } static void s_s3_meta_request_init_signing_date_time( @@ -662,9 +756,147 @@ static void s_s3_meta_request_sign_request( AWS_PRECONDITION(meta_request->vtable); AWS_PRECONDITION(meta_request->vtable->sign_request); + if (request->send_data.metrics) { + struct aws_s3_request_metrics *metric = request->send_data.metrics; + aws_high_res_clock_get_ticks((uint64_t *)&metric->time_metrics.sign_start_timestamp_ns); + } + meta_request->vtable->sign_request(meta_request, request, on_signing_complete, user_data); } +struct aws_get_s3express_credentials_user_data { + /* Keep our own reference to allocator, because the meta request can be gone after the callback invoked. */ + struct aws_allocator *allocator; + + struct aws_s3_meta_request *meta_request; + struct aws_s3_request *request; + aws_signing_complete_fn *on_signing_complete; + + const struct aws_credentials *original_credentials; + + struct aws_signing_config_aws base_signing_config; + struct aws_credentials_properties_s3express properties; + void *user_data; +}; + +static void s_aws_get_s3express_credentials_user_data_destroy(struct aws_get_s3express_credentials_user_data *context) { + aws_s3_meta_request_release(context->meta_request); + aws_credentials_release(context->original_credentials); + aws_mem_release(context->allocator, context); +} + +static void s_get_s3express_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { + struct aws_get_s3express_credentials_user_data *context = user_data; + struct aws_signing_config_aws signing_config = context->base_signing_config; + + if (error_code) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Failed to get S3 Express credentials %p. due to error code %d (%s)", + (void *)context->meta_request, + (void *)context->request, + error_code, + aws_error_str(error_code)); + context->on_signing_complete(NULL, error_code, context->user_data); + goto done; + } + s_s3_meta_request_init_signing_date_time(context->meta_request, &signing_config.date); + /* Override the credentials */ + signing_config.credentials = credentials; + signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4_S3EXPRESS; + if (aws_sign_request_aws( + context->allocator, + context->request->send_data.signable, + (struct aws_signing_config_base *)&signing_config, + context->on_signing_complete, + context->user_data)) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Could not sign request %p. due to error code %d (%s)", + (void *)context->meta_request, + (void *)context->request, + aws_last_error_or_unknown(), + aws_error_str(aws_last_error_or_unknown())); + context->on_signing_complete(NULL, aws_last_error_or_unknown(), context->user_data); + } +done: + s_aws_get_s3express_credentials_user_data_destroy(context); +} + +static void s_get_original_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { + struct aws_get_s3express_credentials_user_data *context = user_data; + struct aws_s3_meta_request *meta_request = context->meta_request; + if (error_code) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Failed to get S3 Express credentials %p. due to error code %d (%s)", + (void *)context->meta_request, + (void *)context->request, + error_code, + aws_error_str(error_code)); + context->on_signing_complete(NULL, error_code, context->user_data); + s_aws_get_s3express_credentials_user_data_destroy(context); + return; + } + context->original_credentials = credentials; + aws_credentials_acquire(context->original_credentials); + + /** + * Derive the credentials for S3 Express. + */ + struct aws_s3_client *client = meta_request->client; + if (aws_s3express_credentials_provider_get_credentials( + client->s3express_provider, + context->original_credentials, + &context->properties, + s_get_s3express_credentials_callback, + context)) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Could not get S3 Express credentials %p", + (void *)meta_request, + (void *)context->request); + context->on_signing_complete(NULL, aws_last_error_or_unknown(), user_data); + s_aws_get_s3express_credentials_user_data_destroy(context); + } +} + +static int s_meta_request_resolve_signing_config( + struct aws_signing_config_aws *out_signing_config, + struct aws_s3_request *request, + struct aws_s3_meta_request *meta_request) { + struct aws_s3_client *client = meta_request->client; + if (meta_request->cached_signing_config != NULL) { + *out_signing_config = meta_request->cached_signing_config->config; + + if (out_signing_config->credentials == NULL && out_signing_config->credentials_provider == NULL) { + /* When no credentials available from meta request level override, we use the credentials from client */ + out_signing_config->credentials = client->cached_signing_config->config.credentials; + out_signing_config->credentials_provider = client->cached_signing_config->config.credentials_provider; + } + } else if (client->cached_signing_config != NULL) { + *out_signing_config = client->cached_signing_config->config; + } else { + /* Not possible to have no cached signing config from both client and request */ + AWS_FATAL_ASSERT(false); + } + + /* If the checksum is configured to be added to the trailer, the payload will be aws-chunked encoded. The payload + * will need to be streaming signed/unsigned. */ + if (meta_request->checksum_config.location == AWS_SCL_TRAILER && + aws_byte_cursor_eq(&out_signing_config->signed_body_value, &g_aws_signed_body_value_unsigned_payload)) { + out_signing_config->signed_body_value = g_aws_signed_body_value_streaming_unsigned_payload_trailer; + } + /* However the initial request for a multipart upload does not have a trailing checksum and is not chunked so it + * must have an unsigned_payload signed_body value*/ + if (request->part_number == 0 && + aws_byte_cursor_eq( + &out_signing_config->signed_body_value, &g_aws_signed_body_value_streaming_unsigned_payload_trailer)) { + out_signing_config->signed_body_value = g_aws_signed_body_value_unsigned_payload; + } + return AWS_OP_SUCCESS; +} + /* Handles signing a message for the caller. */ void aws_s3_meta_request_sign_request_default( struct aws_s3_meta_request *meta_request, @@ -680,11 +912,7 @@ void aws_s3_meta_request_sign_request_default( struct aws_signing_config_aws signing_config; - if (meta_request->cached_signing_config != NULL) { - signing_config = meta_request->cached_signing_config->config; - } else if (client->cached_signing_config != NULL) { - signing_config = client->cached_signing_config->config; - } else { + if (s_meta_request_resolve_signing_config(&signing_config, request, meta_request)) { AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: No signing config present. Not signing request %p.", @@ -695,8 +923,6 @@ void aws_s3_meta_request_sign_request_default( return; } - s_s3_meta_request_init_signing_date_time(meta_request, &signing_config.date); - request->send_data.signable = aws_signable_new_http_request(meta_request->allocator, request->send_data.message); AWS_LOGF_TRACE( @@ -718,36 +944,75 @@ void aws_s3_meta_request_sign_request_default( return; } - /* If the checksum is configured to be added to the trailer, the payload will be aws-chunked encoded. The payload - * will need to be streaming signed/unsigned. */ - if (meta_request->checksum_config.location == AWS_SCL_TRAILER && - aws_byte_cursor_eq(&signing_config.signed_body_value, &g_aws_signed_body_value_unsigned_payload)) { - signing_config.signed_body_value = g_aws_signed_body_value_streaming_unsigned_payload_trailer; - } - /* However the initial request for a multipart upload does not have a trailing checksum and is not chunked so it - * must have an unsigned_payload signed_body value*/ - if (request->part_number == 0 && - aws_byte_cursor_eq( - &signing_config.signed_body_value, &g_aws_signed_body_value_streaming_unsigned_payload_trailer)) { - signing_config.signed_body_value = g_aws_signed_body_value_unsigned_payload; - } - - if (aws_sign_request_aws( - meta_request->allocator, - request->send_data.signable, - (struct aws_signing_config_base *)&signing_config, - on_signing_complete, - user_data)) { + if (signing_config.algorithm == AWS_SIGNING_ALGORITHM_V4_S3EXPRESS) { + /* Fetch credentials from S3 Express provider. */ + struct aws_get_s3express_credentials_user_data *context = + aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_get_s3express_credentials_user_data)); + + context->allocator = meta_request->allocator; + context->base_signing_config = signing_config; + context->meta_request = aws_s3_meta_request_acquire(meta_request); + context->on_signing_complete = on_signing_complete; + context->request = request; + context->user_data = user_data; + context->properties.host = aws_byte_cursor_from_string(meta_request->s3express_session_host); + context->properties.region = signing_config.region; + + if (signing_config.credentials) { + context->original_credentials = signing_config.credentials; + aws_credentials_acquire(context->original_credentials); + /** + * Derive the credentials for S3 Express. + */ + if (aws_s3express_credentials_provider_get_credentials( + client->s3express_provider, + context->original_credentials, + &context->properties, + s_get_s3express_credentials_callback, + context)) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Could not get S3 Express credentials %p", + (void *)meta_request, + (void *)request); + on_signing_complete(NULL, aws_last_error_or_unknown(), user_data); + s_aws_get_s3express_credentials_user_data_destroy(context); + return; + } + } else if (signing_config.credentials_provider) { + /* Get the credentials from provider first. */ + if (aws_credentials_provider_get_credentials( + signing_config.credentials_provider, s_get_original_credentials_callback, context)) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Could not get S3 Express credentials %p", + (void *)meta_request, + (void *)request); + on_signing_complete(NULL, aws_last_error_or_unknown(), user_data); + s_aws_get_s3express_credentials_user_data_destroy(context); + return; + } + } + } else { + /* Regular signing. */ + s_s3_meta_request_init_signing_date_time(meta_request, &signing_config.date); + if (aws_sign_request_aws( + meta_request->allocator, + request->send_data.signable, + (struct aws_signing_config_base *)&signing_config, + on_signing_complete, + user_data)) { - AWS_LOGF_ERROR( - AWS_LS_S3_META_REQUEST, "id=%p: Could not sign request %p", (void *)meta_request, (void *)request); + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, "id=%p: Could not sign request %p", (void *)meta_request, (void *)request); - on_signing_complete(NULL, aws_last_error_or_unknown(), user_data); - return; + on_signing_complete(NULL, aws_last_error_or_unknown(), user_data); + return; + } } } -/* Handle the signing result, getting an HTTP connection for the request if signing succeeded. */ +/* Handle the signing result */ static void s_s3_meta_request_request_on_signed( struct aws_signing_result *signing_result, int error_code, @@ -774,24 +1039,24 @@ static void s_s3_meta_request_request_on_signed( goto finish; } + if (request->send_data.metrics) { + struct aws_s3_request_metrics *metric = request->send_data.metrics; + aws_high_res_clock_get_ticks((uint64_t *)&metric->time_metrics.sign_end_timestamp_ns); + AWS_ASSERT(metric->time_metrics.sign_start_timestamp_ns != 0); + metric->time_metrics.signing_duration_ns = + metric->time_metrics.sign_end_timestamp_ns - metric->time_metrics.sign_start_timestamp_ns; + } + finish: if (error_code != AWS_ERROR_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, - "id=%p Meta request could not sign TTP request due to error code %d (%s)", + "id=%p Meta request could not sign HTTP request due to error code %d (%s)", (void *)meta_request, error_code, aws_error_str(error_code)); - - /* BEGIN CRITICAL SECTION */ - { - aws_s3_meta_request_lock_synced_data(meta_request); - aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); - aws_s3_meta_request_unlock_synced_data(meta_request); - } - /* END CRITICAL SECTION */ } s_s3_prepare_request_payload_callback_and_destroy(payload, error_code); @@ -813,9 +1078,16 @@ void aws_s3_meta_request_send_request(struct aws_s3_meta_request *meta_request, options.request = request->send_data.message; options.user_data = connection; options.on_response_headers = s_s3_meta_request_incoming_headers; - options.on_response_header_block_done = NULL; + options.on_response_header_block_done = s_s3_meta_request_headers_block_done; options.on_response_body = s_s3_meta_request_incoming_body; + if (request->send_data.metrics) { + options.on_metrics = s_s3_meta_request_stream_metrics; + } options.on_complete = s_s3_meta_request_stream_complete; + if (request->request_type == AWS_S3_REQUEST_TYPE_UPLOAD_PART) { + options.response_first_byte_timeout_ms = aws_atomic_load_int(&meta_request->client->upload_timeout_ms); + request->upload_timeout_ms = (size_t)options.response_first_byte_timeout_ms; + } struct aws_http_stream *stream = aws_http_connection_make_request(connection->http_connection, &options); @@ -828,19 +1100,51 @@ void aws_s3_meta_request_send_request(struct aws_s3_meta_request *meta_request, AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p: Sending request %p", (void *)meta_request, (void *)request); - if (aws_http_stream_activate(stream) != AWS_OP_SUCCESS) { - aws_http_stream_release(stream); - stream = NULL; + if (!request->always_send) { + /* BEGIN CRITICAL SECTION */ + aws_s3_meta_request_lock_synced_data(meta_request); + if (aws_s3_meta_request_has_finish_result_synced(meta_request)) { + /* The meta request has finish result already, for this request, treat it as canceled. */ + aws_raise_error(AWS_ERROR_S3_CANCELED); + aws_s3_meta_request_unlock_synced_data(meta_request); + goto error_finish; + } - AWS_LOGF_ERROR( - AWS_LS_S3_META_REQUEST, "id=%p: Could not activate HTTP stream %p", (void *)meta_request, (void *)request); + /* Activate the stream within the lock as once the activate invoked, the HTTP level callback can happen right + * after. */ + if (aws_http_stream_activate(stream) != AWS_OP_SUCCESS) { + aws_s3_meta_request_unlock_synced_data(meta_request); + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Could not activate HTTP stream %p", + (void *)meta_request, + (void *)request); + goto error_finish; + } + aws_linked_list_push_back( + &meta_request->synced_data.cancellable_http_streams_list, &request->cancellable_http_streams_list_node); + request->synced_data.cancellable_http_stream = stream; - goto error_finish; + aws_s3_meta_request_unlock_synced_data(meta_request); + /* END CRITICAL SECTION */ + } else { + /* If the request always send, it is not cancellable. We simply activate the stream. */ + if (aws_http_stream_activate(stream) != AWS_OP_SUCCESS) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Could not activate HTTP stream %p", + (void *)meta_request, + (void *)request); + goto error_finish; + } } - return; error_finish: + if (stream) { + aws_http_stream_release(stream); + stream = NULL; + } s_s3_meta_request_send_request_finish(connection, NULL, aws_last_error_or_unknown()); } @@ -849,15 +1153,16 @@ static int s_s3_meta_request_error_code_from_response_status(int response_status int error_code = AWS_ERROR_UNKNOWN; switch (response_status) { - case AWS_S3_RESPONSE_STATUS_SUCCESS: - case AWS_S3_RESPONSE_STATUS_RANGE_SUCCESS: - case AWS_S3_RESPONSE_STATUS_NO_CONTENT_SUCCESS: + case AWS_HTTP_STATUS_CODE_200_OK: + case AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT: + case AWS_HTTP_STATUS_CODE_204_NO_CONTENT: error_code = AWS_ERROR_SUCCESS; break; - case AWS_S3_RESPONSE_STATUS_INTERNAL_ERROR: + case AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR: error_code = AWS_ERROR_S3_INTERNAL_ERROR; break; - case AWS_S3_RESPONSE_STATUS_SLOW_DOWN: + case AWS_HTTP_STATUS_CODE_503_SERVICE_UNAVAILABLE: + /* S3 response 503 for throttling, slow down the sending */ error_code = AWS_ERROR_S3_SLOW_DOWN; break; default: @@ -899,15 +1204,15 @@ static void s_get_part_response_headers_checksum_helper( aws_base64_compute_encoded_len(aws_get_digest_size_from_algorithm(i), &encoded_len); if (header_sum.len == encoded_len - 1) { aws_byte_buf_init_copy_from_cursor( - &connection->request->request_level_response_header_checksum, aws_default_allocator(), header_sum); - connection->request->request_level_running_response_sum = aws_checksum_new(aws_default_allocator(), i); + &connection->request->request_level_response_header_checksum, meta_request->allocator, header_sum); + connection->request->request_level_running_response_sum = aws_checksum_new(meta_request->allocator, i); } break; } } } -/* warning this might get screwed up with retrys/restarts */ +/* warning this might get screwed up with retries/restarts */ static void s_get_part_response_body_checksum_helper( struct aws_s3_checksum *running_response_sum, const struct aws_byte_cursor *body) { @@ -927,9 +1232,9 @@ static void s_get_response_part_finish_checksum_helper(struct aws_s3_connection size_t encoded_checksum_len = 0; request->did_validate = true; aws_base64_compute_encoded_len(request->request_level_running_response_sum->digest_size, &encoded_checksum_len); - aws_byte_buf_init(&encoded_response_body_sum, aws_default_allocator(), encoded_checksum_len); + aws_byte_buf_init(&encoded_response_body_sum, request->allocator, encoded_checksum_len); aws_byte_buf_init( - &response_body_sum, aws_default_allocator(), request->request_level_running_response_sum->digest_size); + &response_body_sum, request->allocator, request->request_level_running_response_sum->digest_size); aws_checksum_finalize(request->request_level_running_response_sum, &response_body_sum, 0); struct aws_byte_cursor response_body_sum_cursor = aws_byte_cursor_from_buf(&response_body_sum); aws_base64_encode(&response_body_sum_cursor, &encoded_response_body_sum); @@ -939,12 +1244,12 @@ static void s_get_response_part_finish_checksum_helper(struct aws_s3_connection request->validation_algorithm = request->request_level_running_response_sum->algorithm; aws_byte_buf_clean_up(&response_body_sum); aws_byte_buf_clean_up(&encoded_response_body_sum); - aws_checksum_destroy(request->request_level_running_response_sum); - aws_byte_buf_clean_up(&request->request_level_response_header_checksum); - request->request_level_running_response_sum = NULL; } else { request->did_validate = false; } + aws_checksum_destroy(request->request_level_running_response_sum); + aws_byte_buf_clean_up(&request->request_level_response_header_checksum); + request->request_level_running_response_sum = NULL; } static int s_s3_meta_request_incoming_headers( @@ -953,7 +1258,6 @@ static int s_s3_meta_request_incoming_headers( const struct aws_http_header *headers, size_t headers_count, void *user_data) { - (void)header_block; AWS_PRECONDITION(stream); @@ -974,12 +1278,31 @@ static int s_s3_meta_request_incoming_headers( (void *)meta_request, (void *)request); } + if (request->send_data.metrics) { + /* Record the headers to the metrics */ + struct aws_s3_request_metrics *s3_metrics = request->send_data.metrics; + if (s3_metrics->req_resp_info_metrics.response_headers == NULL) { + s3_metrics->req_resp_info_metrics.response_headers = aws_http_headers_new(meta_request->allocator); + } + + for (size_t i = 0; i < headers_count; ++i) { + const struct aws_byte_cursor *name = &headers[i].name; + const struct aws_byte_cursor *value = &headers[i].value; + if (aws_byte_cursor_eq(name, &g_request_id_header_name)) { + s3_metrics->req_resp_info_metrics.request_id = + aws_string_new_from_cursor(connection->request->allocator, value); + } + + aws_http_headers_add(s3_metrics->req_resp_info_metrics.response_headers, *name, *value); + } + s3_metrics->req_resp_info_metrics.response_status = request->send_data.response_status; + } bool successful_response = s_s3_meta_request_error_code_from_response_status(request->send_data.response_status) == AWS_ERROR_SUCCESS; if (successful_response && meta_request->checksum_config.validate_response_checksum && - request->request_tag == AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART) { + request->request_type == AWS_S3_REQUEST_TYPE_GET_OBJECT) { s_get_part_response_headers_checksum_helper(connection, meta_request, headers, headers_count); } @@ -1002,6 +1325,49 @@ static int s_s3_meta_request_incoming_headers( return AWS_OP_SUCCESS; } +static int s_s3_meta_request_headers_block_done( + struct aws_http_stream *stream, + enum aws_http_header_block header_block, + void *user_data) { + (void)stream; + + if (header_block != AWS_HTTP_HEADER_BLOCK_MAIN) { + return AWS_OP_SUCCESS; + } + + struct aws_s3_connection *connection = user_data; + AWS_PRECONDITION(connection); + + struct aws_s3_request *request = connection->request; + AWS_PRECONDITION(request); + + struct aws_s3_meta_request *meta_request = request->meta_request; + AWS_PRECONDITION(meta_request); + + /* + * When downloading parts via partNumber, if the size is larger than expected, cancel the request immediately so we + * don't end up downloading more into memory than we can handle. We'll retry the download using ranged gets instead. + */ + if (request->request_type == AWS_S3_REQUEST_TYPE_GET_OBJECT && + request->request_tag == AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1) { + uint64_t content_length; + if (!aws_s3_parse_content_length_response_header( + request->allocator, request->send_data.response_headers, &content_length) && + content_length > meta_request->part_size) { + return aws_raise_error(AWS_ERROR_S3_INTERNAL_PART_SIZE_MISMATCH_RETRYING_WITH_RANGE); + } + } + return AWS_OP_SUCCESS; +} + +/* + * Small helper to either do a static or dynamic append. + * TODO: something like this would be useful in common. + */ +static int s_response_body_append(struct aws_byte_buf *buf, const struct aws_byte_cursor *data) { + return buf->allocator != NULL ? aws_byte_buf_append_dynamic(buf, data) : aws_byte_buf_append(buf, data); +} + static int s_s3_meta_request_incoming_body( struct aws_http_stream *stream, const struct aws_byte_cursor *data, @@ -1026,7 +1392,9 @@ static int s_s3_meta_request_incoming_body( request->send_data.response_status, (uint64_t)data->len, (void *)connection); - if (request->send_data.response_status < 200 || request->send_data.response_status > 299) { + bool successful_response = + s_s3_meta_request_error_code_from_response_status(request->send_data.response_status) == AWS_ERROR_SUCCESS; + if (!successful_response) { AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "response body: \n" PRInSTR "\n", AWS_BYTE_CURSOR_PRI(*data)); } @@ -1035,17 +1403,18 @@ static int s_s3_meta_request_incoming_body( } if (request->send_data.response_body.capacity == 0) { - size_t buffer_size = s_dynamic_body_initial_buf_size; - - if (request->part_size_response_body) { - buffer_size = meta_request->part_size; + if (request->has_part_size_response_body && request->ticket != NULL) { + request->send_data.response_body = + aws_s3_buffer_pool_acquire_buffer(request->meta_request->client->buffer_pool, request->ticket); + } else { + size_t buffer_size = s_dynamic_body_initial_buf_size; + aws_byte_buf_init(&request->send_data.response_body, meta_request->allocator, buffer_size); } - - aws_byte_buf_init(&request->send_data.response_body, meta_request->allocator, buffer_size); } - if (aws_byte_buf_append_dynamic(&request->send_data.response_body, data)) { - + /* Note: not having part sized response body means the buffer is dynamic and + * can grow. */ + if (s_response_body_append(&request->send_data.response_body, data)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Request %p could not append to response body due to error %d (%s)", @@ -1060,14 +1429,59 @@ static int s_s3_meta_request_incoming_body( return AWS_OP_SUCCESS; } +static void s_s3_meta_request_stream_metrics( + struct aws_http_stream *stream, + const struct aws_http_stream_metrics *http_metrics, + void *user_data) { + (void)stream; + struct aws_s3_connection *connection = user_data; + AWS_PRECONDITION(connection); + + struct aws_s3_request *request = connection->request; + AWS_PRECONDITION(request); + AWS_ASSERT(request->send_data.metrics); + struct aws_s3_request_metrics *s3_metrics = request->send_data.metrics; + /* Copy over the time metrics from aws_http_stream_metrics to aws_s3_request_metrics */ + s3_metrics->time_metrics.send_start_timestamp_ns = http_metrics->send_start_timestamp_ns; + s3_metrics->time_metrics.send_end_timestamp_ns = http_metrics->send_end_timestamp_ns; + s3_metrics->time_metrics.sending_duration_ns = http_metrics->sending_duration_ns; + s3_metrics->time_metrics.receive_start_timestamp_ns = http_metrics->receive_start_timestamp_ns; + s3_metrics->time_metrics.receive_end_timestamp_ns = http_metrics->receive_end_timestamp_ns; + s3_metrics->time_metrics.receiving_duration_ns = http_metrics->receiving_duration_ns; + + s3_metrics->crt_info_metrics.stream_id = http_metrics->stream_id; + + /* Also related metrics from the request/response. */ + s3_metrics->crt_info_metrics.connection_id = (void *)connection->http_connection; + const struct aws_socket_endpoint *endpoint = aws_http_connection_get_remote_endpoint(connection->http_connection); + request->send_data.metrics->crt_info_metrics.ip_address = + aws_string_new_from_c_str(request->allocator, endpoint->address); + AWS_ASSERT(request->send_data.metrics->crt_info_metrics.ip_address != NULL); + + s3_metrics->crt_info_metrics.thread_id = aws_thread_current_thread_id(); +} + /* Finish up the processing of the request work. */ static void s_s3_meta_request_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { struct aws_s3_connection *connection = user_data; AWS_PRECONDITION(connection); - if (connection->request->meta_request->checksum_config.validate_response_checksum) { + struct aws_s3_request *request = connection->request; + struct aws_s3_meta_request *meta_request = request->meta_request; + + if (meta_request->checksum_config.validate_response_checksum) { s_get_response_part_finish_checksum_helper(connection, error_code); } + /* BEGIN CRITICAL SECTION */ + { + aws_s3_meta_request_lock_synced_data(meta_request); + if (request->synced_data.cancellable_http_stream) { + aws_linked_list_remove(&request->cancellable_http_streams_list_node); + request->synced_data.cancellable_http_stream = NULL; + } + aws_s3_meta_request_unlock_synced_data(meta_request); + } + /* END CRITICAL SECTION */ s_s3_meta_request_send_request_finish(connection, stream, error_code); } @@ -1089,41 +1503,66 @@ static void s_s3_meta_request_send_request_finish( vtable->send_request_finish(connection, stream, error_code); } -static int s_s3_meta_request_error_code_from_response_body(struct aws_s3_request *request) { +/* Return whether the response to this request might contain an error, even though we got 200 OK. + * see: https://repost.aws/knowledge-center/s3-resolve-200-internalerror */ +static bool s_should_check_for_error_despite_200_OK(const struct aws_s3_request *request) { + /* We handle async error for every request BUT get object. */ + struct aws_s3_meta_request *meta_request = request->meta_request; + if (meta_request->type == AWS_S3_META_REQUEST_TYPE_GET_OBJECT) { + return false; + } + return true; +} + +static int s_s3_meta_request_error_code_from_response(struct aws_s3_request *request) { AWS_PRECONDITION(request); - if (request->send_data.response_body.len == 0) { - /* Empty body is success */ - return AWS_ERROR_SUCCESS; - } - struct aws_byte_cursor response_body_cursor = aws_byte_cursor_from_buf(&request->send_data.response_body); - bool root_name_mismatch = false; - struct aws_string *error_code_string = aws_xml_get_top_level_tag_with_root_name( - request->allocator, &g_code_body_xml_name, &g_error_body_xml_name, &root_name_mismatch, &response_body_cursor); - if (error_code_string == NULL) { - if (root_name_mismatch || aws_last_error() == AWS_ERROR_MALFORMED_INPUT_STRING) { - /* The xml body is not Error, we can safely think the request succeed. */ - aws_reset_error(); - return AWS_ERROR_SUCCESS; - } else { - return aws_last_error(); + + int error_code_from_status = s_s3_meta_request_error_code_from_response_status(request->send_data.response_status); + + /* Response body might be XML with an <Error><Code> inside. + * The is very likely when status-code is bad. + * In some cases, it's even possible after 200 OK. */ + int error_code_from_xml = AWS_ERROR_SUCCESS; + if (error_code_from_status != AWS_ERROR_SUCCESS || s_should_check_for_error_despite_200_OK(request)) { + if (request->send_data.response_body.len > 0) { + /* Attempt to read as XML, it's fine if this fails. */ + struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(&request->send_data.response_body); + struct aws_byte_cursor error_code_string = {0}; + const char *xml_path[] = {"Error", "Code", NULL}; + if (aws_xml_get_body_at_path(request->allocator, xml_doc, xml_path, &error_code_string) == AWS_OP_SUCCESS) { + /* Found an <Error><Code> string! Map it to CRT error code. */ + error_code_from_xml = aws_s3_crt_error_code_from_server_error_code_string(error_code_string); + } + } + } + + if (error_code_from_status == AWS_ERROR_SUCCESS) { + /* Status-code was OK, so assume everything's good, unless we found an <Error><Code> in the XML */ + switch (error_code_from_xml) { + case AWS_ERROR_SUCCESS: + return AWS_ERROR_SUCCESS; + case AWS_ERROR_UNKNOWN: + return AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR; + default: + return error_code_from_xml; } } else { - /* Check the error code. Map the S3 error code to CRT error code. */ - int error_code = aws_s3_crt_error_code_from_server_error_code_string(error_code_string); - if (error_code == AWS_ERROR_UNKNOWN) { - /* All error besides of internal error from async error are not recoverable from retry for now. */ - error_code = AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR; + /* Return error based on status-code, unless we got something more specific from XML */ + switch (error_code_from_xml) { + case AWS_ERROR_SUCCESS: + return error_code_from_status; + case AWS_ERROR_UNKNOWN: + return error_code_from_status; + default: + return error_code_from_xml; } - aws_string_destroy(error_code_string); - return error_code; } } -static void s_s3_meta_request_send_request_finish_helper( +void aws_s3_meta_request_send_request_finish_default( struct aws_s3_connection *connection, struct aws_http_stream *stream, - int error_code, - bool handle_async_error) { + int error_code) { struct aws_s3_request *request = connection->request; AWS_PRECONDITION(request); @@ -1138,12 +1577,7 @@ static void s_s3_meta_request_send_request_finish_helper( /* If our error code is currently success, then we have some other calls to make that could still indicate a * failure. */ if (error_code == AWS_ERROR_SUCCESS) { - if (handle_async_error && response_status == AWS_HTTP_STATUS_CODE_200_OK) { - error_code = s_s3_meta_request_error_code_from_response_body(request); - } else { - error_code = s_s3_meta_request_error_code_from_response_status(response_status); - } - + error_code = s_s3_meta_request_error_code_from_response(request); if (error_code != AWS_ERROR_SUCCESS) { aws_raise_error(error_code); } @@ -1185,29 +1619,51 @@ static void s_s3_meta_request_send_request_finish_helper( /* If the request failed due to an invalid (ie: unrecoverable) response status, or the meta request already * has a result, then make sure that this request isn't retried. */ if (error_code == AWS_ERROR_S3_INVALID_RESPONSE_STATUS || + error_code == AWS_ERROR_S3_INTERNAL_PART_SIZE_MISMATCH_RETRYING_WITH_RANGE || error_code == AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR || meta_request_finishing) { finish_code = AWS_S3_CONNECTION_FINISH_CODE_FAILED; - - AWS_LOGF_ERROR( - AWS_LS_S3_META_REQUEST, - "id=%p Meta request cannot recover from error %d (%s). (request=%p, response status=%d)", - (void *)meta_request, - error_code, - aws_error_str(error_code), - (void *)request, - response_status); + if (error_code == AWS_ERROR_S3_INTERNAL_PART_SIZE_MISMATCH_RETRYING_WITH_RANGE) { + /* Log at info level instead of error as it's expected and not a fatal error */ + AWS_LOGF_INFO( + AWS_LS_S3_META_REQUEST, + "id=%p Cancelling the request because of error %d (%s). (request=%p, response status=%d)", + (void *)meta_request, + error_code, + aws_error_str(error_code), + (void *)request, + response_status); + } else { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p Meta request cannot recover from error %d (%s). (request=%p, response status=%d)", + (void *)meta_request, + error_code, + aws_error_str(error_code), + (void *)request, + response_status); + } } else { - - AWS_LOGF_ERROR( - AWS_LS_S3_META_REQUEST, - "id=%p Meta request failed from error %d (%s). (request=%p, response status=%d). Try to setup a " - "retry.", - (void *)meta_request, - error_code, - aws_error_str(error_code), - (void *)request, - response_status); + if (error_code == AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT) { + /* Log at info level instead of error as it's somewhat expected. */ + AWS_LOGF_INFO( + AWS_LS_S3_META_REQUEST, + "id=%p Request failed from error %d (%s). (request=%p). Try to setup a retry.", + (void *)meta_request, + error_code, + aws_error_str(error_code), + (void *)request); + } else { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p Request failed from error %d (%s). (request=%p, response status=%d). Try to setup a " + "retry.", + (void *)meta_request, + error_code, + aws_error_str(error_code), + (void *)request, + response_status); + } /* Otherwise, set this up for a retry if the meta request is active. */ finish_code = AWS_S3_CONNECTION_FINISH_CODE_RETRY; @@ -1222,20 +1678,6 @@ static void s_s3_meta_request_send_request_finish_helper( aws_s3_client_notify_connection_finished(client, connection, error_code, finish_code); } -void aws_s3_meta_request_send_request_finish_default( - struct aws_s3_connection *connection, - struct aws_http_stream *stream, - int error_code) { - s_s3_meta_request_send_request_finish_helper(connection, stream, error_code, false /*async error*/); -} - -void aws_s3_meta_request_send_request_finish_handle_async_error( - struct aws_s3_connection *connection, - struct aws_http_stream *stream, - int error_code) { - s_s3_meta_request_send_request_finish_helper(connection, stream, error_code, true /*async error*/); -} - void aws_s3_meta_request_finished_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, @@ -1247,12 +1689,6 @@ void aws_s3_meta_request_finished_request( meta_request->vtable->finished_request(meta_request, request, error_code); } -struct s3_stream_response_body_payload { - struct aws_s3_meta_request *meta_request; - struct aws_linked_list requests; - struct aws_task task; -}; - /* Pushes a request into the body streaming priority queue. Derived meta request types should not call this--they * should instead call aws_s3_meta_request_stream_response_body_synced.*/ static void s_s3_meta_request_body_streaming_push_synced( @@ -1265,19 +1701,17 @@ static void s_s3_meta_request_body_streaming_push_synced( static struct aws_s3_request *s_s3_meta_request_body_streaming_pop_next_synced( struct aws_s3_meta_request *meta_request); -static void s_s3_meta_request_body_streaming_task(struct aws_task *task, void *arg, enum aws_task_status task_status); +static void s_s3_meta_request_event_delivery_task(struct aws_task *task, void *arg, enum aws_task_status task_status); void aws_s3_meta_request_stream_response_body_synced( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request) { + ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); AWS_PRECONDITION(meta_request); AWS_PRECONDITION(request); AWS_PRECONDITION(request->part_number > 0); - struct aws_linked_list streaming_requests; - aws_linked_list_init(&streaming_requests); - /* Push it into the priority queue. */ s_s3_meta_request_body_streaming_push_synced(meta_request, request); @@ -1285,49 +1719,98 @@ void aws_s3_meta_request_stream_response_body_synced( AWS_PRECONDITION(client); aws_atomic_fetch_add(&client->stats.num_requests_stream_queued_waiting, 1); - /* Grab the next request that can be streamed back to the caller. */ - struct aws_s3_request *next_streaming_request = s_s3_meta_request_body_streaming_pop_next_synced(meta_request); + /* Grab any requests that can be streamed back to the caller + * and send them for delivery on io_event_loop thread. */ uint32_t num_streaming_requests = 0; + struct aws_s3_request *next_streaming_request; + while ((next_streaming_request = s_s3_meta_request_body_streaming_pop_next_synced(meta_request)) != NULL) { + struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_RESPONSE_BODY}; + event.u.response_body.completed_request = next_streaming_request; + aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); - /* Grab any additional requests that could be streamed to the caller. */ - while (next_streaming_request != NULL) { - aws_atomic_fetch_sub(&client->stats.num_requests_stream_queued_waiting, 1); - - aws_linked_list_push_back(&streaming_requests, &next_streaming_request->node); ++num_streaming_requests; - next_streaming_request = s_s3_meta_request_body_streaming_pop_next_synced(meta_request); } - if (aws_linked_list_empty(&streaming_requests)) { + if (num_streaming_requests == 0) { return; } - aws_atomic_fetch_add(&client->stats.num_requests_streaming, num_streaming_requests); + aws_atomic_fetch_add(&client->stats.num_requests_streaming_response, num_streaming_requests); + aws_atomic_fetch_sub(&client->stats.num_requests_stream_queued_waiting, num_streaming_requests); meta_request->synced_data.num_parts_delivery_sent += num_streaming_requests; +} - struct s3_stream_response_body_payload *payload = - aws_mem_calloc(client->allocator, 1, sizeof(struct s3_stream_response_body_payload)); +void aws_s3_meta_request_add_event_for_delivery_synced( + struct aws_s3_meta_request *meta_request, + const struct aws_s3_meta_request_event *event) { - aws_s3_meta_request_acquire(meta_request); - payload->meta_request = meta_request; + ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); - aws_linked_list_init(&payload->requests); - aws_linked_list_swap_contents(&payload->requests, &streaming_requests); + aws_array_list_push_back(&meta_request->synced_data.event_delivery_array, event); - aws_task_init( - &payload->task, s_s3_meta_request_body_streaming_task, payload, "s_s3_meta_request_body_streaming_task"); - aws_event_loop_schedule_task_now(meta_request->io_event_loop, &payload->task); + /* If the array was empty before, schedule task to deliver all events in the array. + * If the array already had things in it, then the task is already scheduled and will run soon. */ + if (aws_array_list_length(&meta_request->synced_data.event_delivery_array) == 1) { + aws_s3_meta_request_acquire(meta_request); + + aws_task_init( + &meta_request->synced_data.event_delivery_task, + s_s3_meta_request_event_delivery_task, + meta_request, + "s3_meta_request_event_delivery"); + aws_event_loop_schedule_task_now(meta_request->io_event_loop, &meta_request->synced_data.event_delivery_task); + } } -static void s_s3_meta_request_body_streaming_task(struct aws_task *task, void *arg, enum aws_task_status task_status) { - (void)task; - (void)task_status; +bool aws_s3_meta_request_are_events_out_for_delivery_synced(struct aws_s3_meta_request *meta_request) { + ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); + return aws_array_list_length(&meta_request->synced_data.event_delivery_array) > 0 || + meta_request->synced_data.event_delivery_active; +} - struct s3_stream_response_body_payload *payload = arg; - AWS_PRECONDITION(payload); +void aws_s3_meta_request_cancel_cancellable_requests_synced(struct aws_s3_meta_request *meta_request, int error_code) { + ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); + while (!aws_linked_list_empty(&meta_request->synced_data.cancellable_http_streams_list)) { + struct aws_linked_list_node *request_node = + aws_linked_list_pop_front(&meta_request->synced_data.cancellable_http_streams_list); + struct aws_s3_request *request = + AWS_CONTAINER_OF(request_node, struct aws_s3_request, cancellable_http_streams_list_node); + AWS_ASSERT(!request->always_send); + + aws_http_stream_cancel(request->synced_data.cancellable_http_stream, error_code); + request->synced_data.cancellable_http_stream = NULL; + } +} + +static struct aws_s3_request_metrics *s_s3_request_finish_up_and_release_metrics( + struct aws_s3_request_metrics *metrics, + struct aws_s3_meta_request *meta_request) { + + if (metrics != NULL) { + /* Request is done streaming the body, complete the metrics for the request now. */ + + if (metrics->time_metrics.end_timestamp_ns == -1) { + aws_high_res_clock_get_ticks((uint64_t *)&metrics->time_metrics.end_timestamp_ns); + metrics->time_metrics.total_duration_ns = + metrics->time_metrics.end_timestamp_ns - metrics->time_metrics.start_timestamp_ns; + } + + if (meta_request->telemetry_callback != NULL) { + /* We already in the meta request event thread, invoke the telemetry callback directly */ + meta_request->telemetry_callback(meta_request, metrics, meta_request->user_data); + } + aws_s3_request_metrics_release(metrics); + } + return NULL; +} - struct aws_s3_meta_request *meta_request = payload->meta_request; +/* Deliver events in event_delivery_array. + * This task runs on the meta-request's io_event_loop thread. */ +static void s_s3_meta_request_event_delivery_task(struct aws_task *task, void *arg, enum aws_task_status task_status) { + (void)task; + (void)task_status; + struct aws_s3_meta_request *meta_request = arg; AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->vtable); @@ -1337,40 +1820,111 @@ static void s_s3_meta_request_body_streaming_task(struct aws_task *task, void *a /* Client owns this event loop group. A cancel should not be possible. */ AWS_ASSERT(task_status == AWS_TASK_STATUS_RUN_READY); - struct aws_linked_list completed_requests; - aws_linked_list_init(&completed_requests); + /* Swap contents of synced_data.event_delivery_array into this pre-allocated array-list, then process events */ + struct aws_array_list *event_delivery_array = &meta_request->io_threaded_data.event_delivery_array; + AWS_FATAL_ASSERT(aws_array_list_length(event_delivery_array) == 0); + /* If an error occurs, don't fire callbacks anymore. */ int error_code = AWS_ERROR_SUCCESS; - uint32_t num_successful = 0; - uint32_t num_failed = 0; + uint32_t num_parts_delivered = 0; - while (!aws_linked_list_empty(&payload->requests)) { - struct aws_linked_list_node *request_node = aws_linked_list_pop_front(&payload->requests); - struct aws_s3_request *request = AWS_CONTAINER_OF(request_node, struct aws_s3_request, node); - AWS_ASSERT(meta_request == request->meta_request); - struct aws_byte_cursor body_buffer_byte_cursor = aws_byte_cursor_from_buf(&request->send_data.response_body); + /* BEGIN CRITICAL SECTION */ + { + aws_s3_meta_request_lock_synced_data(meta_request); - AWS_ASSERT(request->part_number >= 1); + aws_array_list_swap_contents(event_delivery_array, &meta_request->synced_data.event_delivery_array); + meta_request->synced_data.event_delivery_active = true; - if (aws_s3_meta_request_has_finish_result(meta_request)) { - ++num_failed; - } else { - if (body_buffer_byte_cursor.len > 0 && error_code == AWS_ERROR_SUCCESS && meta_request->body_callback && - meta_request->body_callback( - meta_request, &body_buffer_byte_cursor, request->part_range_start, meta_request->user_data)) { - error_code = aws_last_error_or_unknown(); - } - - if (error_code == AWS_ERROR_SUCCESS) { - ++num_successful; - } else { - ++num_failed; - } + if (aws_s3_meta_request_has_finish_result_synced(meta_request)) { + error_code = AWS_ERROR_S3_CANCELED; } - aws_atomic_fetch_sub(&client->stats.num_requests_streaming, 1); - aws_s3_request_release(request); + aws_s3_meta_request_unlock_synced_data(meta_request); + } + /* END CRITICAL SECTION */ + + /* Deliver all events */ + for (size_t event_i = 0; event_i < aws_array_list_length(event_delivery_array); ++event_i) { + struct aws_s3_meta_request_event event; + aws_array_list_get_at(event_delivery_array, &event, event_i); + switch (event.type) { + + case AWS_S3_META_REQUEST_EVENT_RESPONSE_BODY: { + struct aws_s3_request *request = event.u.response_body.completed_request; + AWS_ASSERT(meta_request == request->meta_request); + struct aws_byte_cursor response_body = aws_byte_cursor_from_buf(&request->send_data.response_body); + + AWS_ASSERT(request->part_number >= 1); + + if (error_code == AWS_ERROR_SUCCESS && response_body.len > 0 && meta_request->body_callback != NULL) { + if (meta_request->body_callback( + meta_request, &response_body, request->part_range_start, meta_request->user_data)) { + + error_code = aws_last_error_or_unknown(); + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p Response body callback raised error %d (%s).", + (void *)meta_request, + error_code, + aws_error_str(error_code)); + } + } + aws_atomic_fetch_sub(&client->stats.num_requests_streaming_response, 1); + + ++num_parts_delivered; + request->send_data.metrics = + s_s3_request_finish_up_and_release_metrics(request->send_data.metrics, meta_request); + + aws_s3_request_release(request); + } break; + + case AWS_S3_META_REQUEST_EVENT_PROGRESS: { + if (error_code == AWS_ERROR_SUCCESS && meta_request->progress_callback != NULL) { + /* Don't report 0 byte progress events. + * The reasoning behind this is: + * + * In some code paths, when no data is transferred, there are no progress events, + * but in other code paths there might be one progress event of 0 bytes. + * We want to be consistent, either: + * - REPORT AT LEAST ONCE: even if no data is being transferred. + * This would require finding every code path where no progress events are sent, + * and sending an appropriate progress event, even if it's for 0 bytes. + * One example of ending early is: when resuming a paused upload, + * we do ListParts on the UploadID, and if that 404s we assume the + * previous "paused" meta-request actually completed, + * and so we immediately end the "resuming" meta-request + * as successful without sending any further HTTP requests. + * It would be tough to accurately report progress here because + * we don't know the total size, since we never read the request body, + * and didn't get any info about the previous upload. + * OR + * - NEVER REPORT ZERO BYTES: even if that means no progress events at all. + * This is easy to do. We'd only send progress events when data is transferred, + * and if a 0 byte event slips through somehow, just check before firing the callback. + * Since the NEVER REPORT ZERO BYTES path is simpler to implement, we went with that. */ + if (event.u.progress.info.bytes_transferred > 0) { + meta_request->progress_callback(meta_request, &event.u.progress.info, meta_request->user_data); + } + } + } break; + + case AWS_S3_META_REQUEST_EVENT_TELEMETRY: { + struct aws_s3_request_metrics *metrics = event.u.telemetry.metrics; + AWS_FATAL_ASSERT(meta_request->telemetry_callback != NULL); + AWS_FATAL_ASSERT(metrics != NULL); + + event.u.telemetry.metrics = + s_s3_request_finish_up_and_release_metrics(event.u.telemetry.metrics, meta_request); + } break; + + default: + AWS_FATAL_ASSERT(false); + } } + + /* Done delivering events */ + aws_array_list_clear(event_delivery_array); + /* BEGIN CRITICAL SECTION */ { aws_s3_meta_request_lock_synced_data(meta_request); @@ -1378,14 +1932,11 @@ static void s_s3_meta_request_body_streaming_task(struct aws_task *task, void *a aws_s3_meta_request_set_fail_synced(meta_request, NULL, error_code); } - meta_request->synced_data.num_parts_delivery_completed += (num_failed + num_successful); - meta_request->synced_data.num_parts_delivery_failed += num_failed; - meta_request->synced_data.num_parts_delivery_succeeded += num_successful; + meta_request->synced_data.num_parts_delivery_completed += num_parts_delivered; + meta_request->synced_data.event_delivery_active = false; aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ - aws_mem_release(client->allocator, payload); - payload = NULL; aws_s3_client_schedule_process_work(client); aws_s3_meta_request_release(meta_request); @@ -1449,6 +2000,8 @@ void aws_s3_meta_request_finish_default(struct aws_s3_meta_request *meta_request struct aws_linked_list release_request_list; aws_linked_list_init(&release_request_list); + struct aws_future_void *pending_async_write_future = NULL; + struct aws_s3_meta_request_result finish_result; AWS_ZERO_STRUCT(finish_result); @@ -1472,6 +2025,10 @@ void aws_s3_meta_request_finish_default(struct aws_s3_meta_request *meta_request aws_linked_list_push_back(&release_request_list, &request->node); } + /* Clean out any pending async-write future */ + pending_async_write_future = meta_request->synced_data.async_write.future; + meta_request->synced_data.async_write.future = NULL; + finish_result = meta_request->synced_data.finish_result; AWS_ZERO_STRUCT(meta_request->synced_data.finish_result); @@ -1484,10 +2041,23 @@ void aws_s3_meta_request_finish_default(struct aws_s3_meta_request *meta_request return; } + if (pending_async_write_future != NULL) { + AWS_LOGF_TRACE( + AWS_LS_S3_META_REQUEST, + "id=%p: write future complete due to meta request's early finish", + (void *)meta_request); + aws_future_void_set_error(pending_async_write_future, AWS_ERROR_S3_REQUEST_HAS_COMPLETED); + pending_async_write_future = aws_future_void_release(pending_async_write_future); + } + while (!aws_linked_list_empty(&release_request_list)) { struct aws_linked_list_node *request_node = aws_linked_list_pop_front(&release_request_list); struct aws_s3_request *release_request = AWS_CONTAINER_OF(request_node, struct aws_s3_request, node); AWS_FATAL_ASSERT(release_request != NULL); + /* This pending-body-streaming request was never moved to the event-delivery queue, + * so its metrics were never finished. Finish them now. */ + release_request->send_data.metrics = + s_s3_request_finish_up_and_release_metrics(release_request->send_data.metrics, meta_request); aws_s3_request_release(release_request); } @@ -1509,13 +2079,13 @@ void aws_s3_meta_request_finish_default(struct aws_s3_meta_request *meta_request finish_result.error_code, aws_error_str(finish_result.error_code)); - /* As the meta request has been finished with any HTTP message, we can safely release the http message that hold. So - * that, the downstream high level language doesn't need to wait for shutdown to clean related resource (eg: input - * stream) */ - if (meta_request->initial_request_message) { - aws_http_message_release(meta_request->initial_request_message); - meta_request->initial_request_message = NULL; - } + /* As the meta request has been finished with any HTTP message, we can safely release the http message that + * hold. So that, the downstream high level language doesn't need to wait for shutdown to clean related resource + * (eg: input stream) */ + meta_request->request_body_async_stream = aws_async_input_stream_release(meta_request->request_body_async_stream); + meta_request->request_body_parallel_stream = + aws_parallel_input_stream_release(meta_request->request_body_parallel_stream); + meta_request->initial_request_message = aws_http_message_release(meta_request->initial_request_message); if (meta_request->finish_callback != NULL) { meta_request->finish_callback(meta_request, &finish_result, meta_request->user_data); @@ -1529,41 +2099,88 @@ void aws_s3_meta_request_finish_default(struct aws_s3_meta_request *meta_request meta_request->io_event_loop = NULL; } -int aws_s3_meta_request_read_body(struct aws_s3_meta_request *meta_request, struct aws_byte_buf *buffer) { +struct aws_future_bool *aws_s3_meta_request_read_body( + struct aws_s3_meta_request *meta_request, + uint64_t offset, + struct aws_byte_buf *buffer) { + AWS_PRECONDITION(meta_request); AWS_PRECONDITION(buffer); - struct aws_input_stream *initial_body_stream = + /* If async-stream, simply call read_to_fill() */ + if (meta_request->request_body_async_stream != NULL) { + return aws_async_input_stream_read_to_fill(meta_request->request_body_async_stream, buffer); + } + + /* If parallel-stream, simply call read(), which must fill the buffer and/or EOF */ + if (meta_request->request_body_parallel_stream != NULL) { + return aws_parallel_input_stream_read(meta_request->request_body_parallel_stream, offset, buffer); + } + + /* Further techniques are synchronous... */ + struct aws_future_bool *synchronous_read_future = aws_future_bool_new(meta_request->allocator); + + /* If using async-writes, call function which fills the buffer and/or hits EOF */ + if (meta_request->request_body_using_async_writes == true) { + bool eof = false; + if (s_s3_meta_request_read_from_pending_async_writes(meta_request, buffer, &eof) == AWS_OP_SUCCESS) { + aws_future_bool_set_result(synchronous_read_future, eof); + } else { + aws_future_bool_set_error(synchronous_read_future, aws_last_error()); + } + return synchronous_read_future; + } + + /* Else synchronous aws_input_stream */ + struct aws_input_stream *synchronous_stream = aws_http_message_get_body_stream(meta_request->initial_request_message); - AWS_FATAL_ASSERT(initial_body_stream); + AWS_FATAL_ASSERT(synchronous_stream); + + /* Keep calling read() until we fill the buffer, or hit EOF */ + struct aws_stream_status status = {.is_end_of_stream = false, .is_valid = true}; + while ((buffer->len < buffer->capacity) && !status.is_end_of_stream) { + /* Read from stream */ + if (aws_input_stream_read(synchronous_stream, buffer) != AWS_OP_SUCCESS) { + aws_future_bool_set_error(synchronous_read_future, aws_last_error()); + goto synchronous_read_done; + } - /* Copy it into our buffer. */ - if (aws_input_stream_read(initial_body_stream, buffer)) { - AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "id=%p Could not read from body stream.", (void *)meta_request); - return AWS_OP_ERR; + /* Check if stream is done */ + if (aws_input_stream_get_status(synchronous_stream, &status) != AWS_OP_SUCCESS) { + aws_future_bool_set_error(synchronous_read_future, aws_last_error()); + goto synchronous_read_done; + } } - return AWS_OP_SUCCESS; + aws_future_bool_set_result(synchronous_read_future, status.is_end_of_stream); + +synchronous_read_done: + return synchronous_read_future; } void aws_s3_meta_request_result_setup( struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_result *result, - struct aws_s3_request *request, + struct aws_s3_request *failed_request, int response_status, int error_code) { - if (request != NULL) { - if (request->send_data.response_headers != NULL) { - result->error_response_headers = request->send_data.response_headers; + if (failed_request != NULL) { + if (failed_request->send_data.response_headers != NULL) { + result->error_response_headers = failed_request->send_data.response_headers; aws_http_headers_acquire(result->error_response_headers); } - if (request->send_data.response_body.capacity > 0) { + if (failed_request->send_data.response_body.capacity > 0) { result->error_response_body = aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_byte_buf)); aws_byte_buf_init_copy( - result->error_response_body, meta_request->allocator, &request->send_data.response_body); + result->error_response_body, meta_request->allocator, &failed_request->send_data.response_body); + } + + if (failed_request->operation_name != NULL) { + result->error_response_operation_name = + aws_string_new_from_string(meta_request->allocator, failed_request->operation_name); } } @@ -1571,6 +2188,170 @@ void aws_s3_meta_request_result_setup( result->error_code = error_code; } +struct aws_future_void *aws_s3_meta_request_write( + struct aws_s3_meta_request *meta_request, + struct aws_byte_cursor data, + bool eof) { + + struct aws_future_void *write_future = aws_future_void_new(meta_request->allocator); + + /* Set this true, while lock is held, if we're ready to send data */ + bool ready_to_send = false; + + /* Set this true, while lock is held, if write() was called illegally + * and the meta-request should terminate */ + bool illegal_usage_terminate_meta_request = false; + + /* BEGIN CRITICAL SECTION */ + aws_s3_meta_request_lock_synced_data(meta_request); + + if (aws_s3_meta_request_has_finish_result_synced(meta_request)) { + /* The meta-request is already complete */ + AWS_LOGF_DEBUG( + AWS_LS_S3_META_REQUEST, + "id=%p: Ignoring write(), the meta request is already complete.", + (void *)meta_request); + aws_future_void_set_error(write_future, AWS_ERROR_S3_REQUEST_HAS_COMPLETED); + + } else if (!meta_request->request_body_using_async_writes) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Illegal call to write(). The meta-request must be configured to send-using-data-writes.", + (void *)meta_request); + illegal_usage_terminate_meta_request = true; + + } else if (meta_request->synced_data.async_write.future != NULL) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "id=%p: Illegal call to write(). The previous write is not complete.", + (void *)meta_request); + illegal_usage_terminate_meta_request = true; + + } else if (meta_request->synced_data.async_write.eof) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, "id=%p: Illegal call to write(). EOF already set.", (void *)meta_request); + illegal_usage_terminate_meta_request = true; + + } else if (eof || (meta_request->synced_data.async_write.buffered_data.len + data.len >= meta_request->part_size)) { + /* This write makes us ready to send (EOF, or we have enough data now to send at least 1 part) */ + AWS_LOGF_TRACE( + AWS_LS_S3_META_REQUEST, + "id=%p: write(data=%zu, eof=%d) previously-buffered=%zu. Ready to upload part...", + (void *)meta_request, + data.len, + eof, + meta_request->synced_data.async_write.buffered_data.len); + + meta_request->synced_data.async_write.unbuffered_cursor = data; + meta_request->synced_data.async_write.eof = eof; + meta_request->synced_data.async_write.future = aws_future_void_acquire(write_future); + ready_to_send = true; + + } else { + /* Can't send yet. Buffer the data and complete its future, so we can get more data */ + AWS_LOGF_TRACE( + AWS_LS_S3_META_REQUEST, + "id=%p: write(data=%zu, eof=%d) previously-buffered=%zu. Buffering data, not enough to upload.", + (void *)meta_request, + data.len, + eof, + meta_request->synced_data.async_write.buffered_data.len); + + /* TODO: something smarter with this buffer: like get it from buffer-pool, + * or reserve exactly part-size, or reserve exactly how much we need */ + aws_byte_buf_append_dynamic(&meta_request->synced_data.async_write.buffered_data, &data); + + /* TODO: does a future that completes immediately risk stack overflow? + * If a user does tiny writes, and registers callbacks on the write-future, + * they'll fire synchronously. If the user repeats, the stack will just grow and grow. */ + aws_future_void_set_result(write_future); + } + + if (illegal_usage_terminate_meta_request) { + aws_future_void_set_error(write_future, AWS_ERROR_INVALID_STATE); + aws_s3_meta_request_set_fail_synced(meta_request, NULL, AWS_ERROR_INVALID_STATE); + } + + aws_s3_meta_request_unlock_synced_data(meta_request); + /* END CRITICAL SECTION */ + + if (ready_to_send || illegal_usage_terminate_meta_request) { + /* Schedule the work task, to continue processing the meta-request */ + aws_s3_client_schedule_process_work(meta_request->client); + } + + return write_future; +} + +/* Copy pending async-write data into the buffer. + * This is only called when there's enough data for the next part. */ +static int s_s3_meta_request_read_from_pending_async_writes( + struct aws_s3_meta_request *meta_request, + struct aws_byte_buf *dest, + bool *eof) { + + *eof = false; + + struct aws_future_void *write_future_to_complete = NULL; + int error_code = 0; + + /* BEGIN CRITICAL SECTION */ + aws_s3_meta_request_lock_synced_data(meta_request); + + /* If user calls aws_s3_meta_request_cancel(), it will synchronously complete any pending async-writes. + * So if the write-future is unexpectedly gone, that's what happened, don't touch the data. */ + if (meta_request->synced_data.async_write.future == NULL) { + error_code = AWS_ERROR_S3_CANCELED; + goto unlock; + } + + /* Buffered data should not exceed part-size */ + AWS_FATAL_ASSERT(dest->capacity - dest->len >= meta_request->synced_data.async_write.buffered_data.len); + + /* Copy all buffered data */ + aws_byte_buf_write_from_whole_buffer(dest, meta_request->synced_data.async_write.buffered_data); + meta_request->synced_data.async_write.buffered_data.len = 0; + + /* Copy as much unbuffered data as possible */ + aws_byte_buf_write_to_capacity(dest, &meta_request->synced_data.async_write.unbuffered_cursor); + + /* We should have filled the dest buffer, unless this is the final write */ + AWS_FATAL_ASSERT(dest->len == dest->capacity || meta_request->synced_data.async_write.eof); + + /* If we haven't received EOF, and there's not enough data in unbuffered_cursor to fill another part, + * then we need to move it into buffered_data, so we can complete the write's future and get more data */ + if (!meta_request->synced_data.async_write.eof && + meta_request->synced_data.async_write.unbuffered_cursor.len < meta_request->part_size) { + + aws_byte_buf_append_dynamic( + &meta_request->synced_data.async_write.buffered_data, + &meta_request->synced_data.async_write.unbuffered_cursor); + meta_request->synced_data.async_write.unbuffered_cursor.len = 0; + } + + /* If all unbuffered data is consumed (we sent it, or buffered it) then complete the write's future */ + if (meta_request->synced_data.async_write.unbuffered_cursor.len == 0) { + write_future_to_complete = meta_request->synced_data.async_write.future; + meta_request->synced_data.async_write.future = NULL; + + if (meta_request->synced_data.async_write.eof) { + *eof = true; + } + } +unlock: + aws_s3_meta_request_unlock_synced_data(meta_request); + /* END CRITICAL SECTION */ + + /* Don't hold locks while completing the future, it might trigger a user callback */ + if (write_future_to_complete != NULL) { + AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p: write future complete", (void *)meta_request); + aws_future_void_set_result(write_future_to_complete); + aws_future_void_release(write_future_to_complete); + } + + return error_code == 0 ? AWS_OP_SUCCESS : aws_raise_error(error_code); +} + void aws_s3_meta_request_result_clean_up( struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_result *result) { @@ -1584,6 +2365,8 @@ void aws_s3_meta_request_result_clean_up( aws_mem_release(meta_request->allocator, result->error_response_body); } + aws_string_destroy(result->error_response_operation_name); + AWS_ZERO_STRUCT(*result); } diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_paginator.c b/contrib/restricted/aws/aws-c-s3/source/s3_paginator.c index e42845d87f2..57e2fb7312c 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_paginator.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_paginator.c @@ -158,8 +158,9 @@ struct aws_s3_paginated_operation *aws_s3_paginated_operation_new( aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_paginated_operation)); operation->allocator = allocator; - operation->result_xml_node_name = aws_string_new_from_cursor(allocator, params->result_xml_node_name); - operation->continuation_xml_node_name = aws_string_new_from_cursor(allocator, params->continuation_token_node_name); + operation->result_xml_node_name = aws_string_new_from_cursor(allocator, ¶ms->result_xml_node_name); + operation->continuation_xml_node_name = + aws_string_new_from_cursor(allocator, ¶ms->continuation_token_node_name); operation->next_http_message = params->next_message; operation->on_result_node_encountered = params->on_result_node_encountered_fn; @@ -247,54 +248,53 @@ struct parser_wrapper { bool has_more_results; }; -static bool s_on_result_node_encountered(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) { +static int s_on_result_node_encountered(struct aws_xml_node *node, void *user_data) { struct parser_wrapper *wrapper = user_data; - struct aws_byte_cursor node_name; - aws_xml_node_get_name(node, &node_name); + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); struct aws_byte_cursor continuation_name_val = aws_byte_cursor_from_string(wrapper->operation->continuation_xml_node_name); if (aws_byte_cursor_eq_ignore_case(&node_name, &continuation_name_val)) { struct aws_byte_cursor continuation_token_cur; - bool ret_val = aws_xml_node_as_body(parser, node, &continuation_token_cur) == AWS_OP_SUCCESS; - - if (ret_val) { - wrapper->next_continuation_token = - aws_string_new_from_cursor(wrapper->operation->allocator, &continuation_token_cur); + if (aws_xml_node_as_body(node, &continuation_token_cur) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; } - return ret_val; + wrapper->next_continuation_token = + aws_string_new_from_cursor(wrapper->operation->allocator, &continuation_token_cur); + + return AWS_OP_SUCCESS; } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "IsTruncated")) { struct aws_byte_cursor truncated_cur; - bool ret_val = aws_xml_node_as_body(parser, node, &truncated_cur) == AWS_OP_SUCCESS; + if (aws_xml_node_as_body(node, &truncated_cur) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; + } - if (ret_val) { - if (aws_byte_cursor_eq_c_str_ignore_case(&truncated_cur, "true")) { - wrapper->has_more_results = true; - } + if (aws_byte_cursor_eq_c_str_ignore_case(&truncated_cur, "true")) { + wrapper->has_more_results = true; } - return ret_val; + return AWS_OP_SUCCESS; } - return wrapper->operation->on_result_node_encountered(parser, node, wrapper->operation->user_data); + return wrapper->operation->on_result_node_encountered(node, wrapper->operation->user_data); } -static bool s_on_root_node_encountered(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) { +static int s_on_root_node_encountered(struct aws_xml_node *node, void *user_data) { struct parser_wrapper *wrapper = user_data; - struct aws_byte_cursor node_name; - aws_xml_node_get_name(node, &node_name); + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); struct aws_byte_cursor result_name_val = aws_byte_cursor_from_string(wrapper->operation->result_xml_node_name); if (aws_byte_cursor_eq_ignore_case(&node_name, &result_name_val)) { - return aws_xml_node_traverse(parser, node, s_on_result_node_encountered, wrapper); + return aws_xml_node_traverse(node, s_on_result_node_encountered, wrapper); } - return false; + /* root element not what we expected */ + return aws_raise_error(AWS_ERROR_INVALID_XML); } static void s_on_request_finished( @@ -355,25 +355,26 @@ int aws_s3_paginated_operation_on_response( struct aws_string **continuation_token_out, bool *has_more_results_out) { - struct aws_xml_parser_options parser_options = { - .doc = *response_body, - .max_depth = 16U, - }; - struct parser_wrapper wrapper = {.operation = operation}; /* we've got a full xml document now and the request succeeded, parse the document and fire all the callbacks * for each object and prefix. All of that happens in these three lines. */ - struct aws_xml_parser *parser = aws_xml_parser_new(operation->allocator, &parser_options); - int error_code = aws_xml_parser_parse(parser, s_on_root_node_encountered, &wrapper); - aws_xml_parser_destroy(parser); - - if (error_code == AWS_OP_SUCCESS) { - *continuation_token_out = wrapper.next_continuation_token; - *has_more_results_out = wrapper.has_more_results; + struct aws_xml_parser_options parser_options = { + .doc = *response_body, + .max_depth = 16U, + .on_root_encountered = s_on_root_node_encountered, + .user_data = &wrapper, + }; + if (aws_xml_parse(operation->allocator, &parser_options) != AWS_OP_SUCCESS) { + aws_string_destroy(wrapper.next_continuation_token); + *continuation_token_out = NULL; + *has_more_results_out = false; + return AWS_OP_ERR; } - return error_code; + *continuation_token_out = wrapper.next_continuation_token; + *has_more_results_out = wrapper.has_more_results; + return AWS_OP_SUCCESS; } int aws_s3_construct_next_paginated_request_http_message( diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_parallel_input_stream.c b/contrib/restricted/aws/aws-c-s3/source/s3_parallel_input_stream.c new file mode 100644 index 00000000000..461525762c5 --- /dev/null +++ b/contrib/restricted/aws/aws-c-s3/source/s3_parallel_input_stream.c @@ -0,0 +1,140 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include "aws/s3/private/s3_parallel_input_stream.h" + +#include <aws/common/file.h> + +#include <aws/io/future.h> +#include <aws/io/stream.h> + +#include <errno.h> + +void aws_parallel_input_stream_init_base( + struct aws_parallel_input_stream *stream, + struct aws_allocator *alloc, + const struct aws_parallel_input_stream_vtable *vtable, + void *impl) { + + AWS_ZERO_STRUCT(*stream); + stream->alloc = alloc; + stream->vtable = vtable; + stream->impl = impl; + aws_ref_count_init(&stream->ref_count, stream, (aws_simple_completion_callback *)vtable->destroy); +} + +struct aws_parallel_input_stream *aws_parallel_input_stream_acquire(struct aws_parallel_input_stream *stream) { + if (stream != NULL) { + aws_ref_count_acquire(&stream->ref_count); + } + return stream; +} + +struct aws_parallel_input_stream *aws_parallel_input_stream_release(struct aws_parallel_input_stream *stream) { + if (stream != NULL) { + aws_ref_count_release(&stream->ref_count); + } + return NULL; +} + +struct aws_future_bool *aws_parallel_input_stream_read( + struct aws_parallel_input_stream *stream, + uint64_t offset, + struct aws_byte_buf *dest) { + /* Ensure the buffer has space available */ + if (dest->len == dest->capacity) { + struct aws_future_bool *future = aws_future_bool_new(stream->alloc); + aws_future_bool_set_error(future, AWS_ERROR_SHORT_BUFFER); + return future; + } + struct aws_future_bool *future = stream->vtable->read(stream, offset, dest); + return future; +} + +struct aws_parallel_input_stream_from_file_impl { + struct aws_parallel_input_stream base; + + struct aws_string *file_path; +}; + +static void s_para_from_file_destroy(struct aws_parallel_input_stream *stream) { + struct aws_parallel_input_stream_from_file_impl *impl = stream->impl; + + aws_string_destroy(impl->file_path); + + aws_mem_release(stream->alloc, impl); +} + +struct aws_future_bool *s_para_from_file_read( + struct aws_parallel_input_stream *stream, + uint64_t offset, + struct aws_byte_buf *dest) { + + struct aws_future_bool *future = aws_future_bool_new(stream->alloc); + struct aws_parallel_input_stream_from_file_impl *impl = stream->impl; + bool success = false; + struct aws_input_stream *file_stream = NULL; + struct aws_stream_status status = { + .is_end_of_stream = false, + .is_valid = true, + }; + + file_stream = aws_input_stream_new_from_file(stream->alloc, aws_string_c_str(impl->file_path)); + if (!file_stream) { + goto done; + } + + if (aws_input_stream_seek(file_stream, offset, AWS_SSB_BEGIN)) { + goto done; + } + /* Keep reading until fill the buffer. + * Note that we must read() after seek() to determine if we're EOF, the seek alone won't trigger it. */ + while ((dest->len < dest->capacity) && !status.is_end_of_stream) { + /* Read from stream */ + if (aws_input_stream_read(file_stream, dest) != AWS_OP_SUCCESS) { + goto done; + } + + /* Check if stream is done */ + if (aws_input_stream_get_status(file_stream, &status) != AWS_OP_SUCCESS) { + goto done; + } + } + success = true; +done: + if (success) { + aws_future_bool_set_result(future, status.is_end_of_stream); + } else { + aws_future_bool_set_error(future, aws_last_error()); + } + + aws_input_stream_release(file_stream); + + return future; +} + +static struct aws_parallel_input_stream_vtable s_parallel_input_stream_from_file_vtable = { + .destroy = s_para_from_file_destroy, + .read = s_para_from_file_read, +}; + +struct aws_parallel_input_stream *aws_parallel_input_stream_new_from_file( + struct aws_allocator *allocator, + struct aws_byte_cursor file_name) { + + struct aws_parallel_input_stream_from_file_impl *impl = + aws_mem_calloc(allocator, 1, sizeof(struct aws_parallel_input_stream_from_file_impl)); + aws_parallel_input_stream_init_base(&impl->base, allocator, &s_parallel_input_stream_from_file_vtable, impl); + impl->file_path = aws_string_new_from_cursor(allocator, &file_name); + if (!aws_path_exists(impl->file_path)) { + /* If file path not exists, raise error from errno. */ + aws_translate_and_raise_io_error(errno); + goto error; + } + return &impl->base; +error: + s_para_from_file_destroy(&impl->base); + return NULL; +} diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_platform_info.c b/contrib/restricted/aws/aws-c-s3/source/s3_platform_info.c new file mode 100644 index 00000000000..05deedd9fe8 --- /dev/null +++ b/contrib/restricted/aws/aws-c-s3/source/s3_platform_info.c @@ -0,0 +1,614 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include <aws/auth/aws_imds_client.h> +#include <aws/common/clock.h> +#include <aws/common/condition_variable.h> +#include <aws/common/hash_table.h> +#include <aws/common/mutex.h> +#include <aws/common/system_info.h> +#include <aws/io/channel_bootstrap.h> +#include <aws/io/event_loop.h> +#include <aws/io/host_resolver.h> +#include <aws/s3/private/s3_platform_info.h> + +/**** Configuration info for the c5n.18xlarge *****/ +static struct aws_byte_cursor s_c5n_nic_array[] = {AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth0")}; + +static struct aws_s3_cpu_group_info s_c5n_18xlarge_cpu_group_info_array[] = { + { + .cpu_group = 0u, + .nic_name_array = s_c5n_nic_array, + .nic_name_array_length = AWS_ARRAY_SIZE(s_c5n_nic_array), + .cpus_in_group = 36, + }, + { + .cpu_group = 1u, + .nic_name_array = NULL, + .nic_name_array_length = 0u, + .cpus_in_group = 36, + }, +}; + +static struct aws_s3_platform_info s_c5n_18xlarge_platform_info = { + .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("c5n.18xlarge"), + .max_throughput_gbps = 100u, + .cpu_group_info_array = s_c5n_18xlarge_cpu_group_info_array, + .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_c5n_18xlarge_cpu_group_info_array), + /** not yet **/ + .has_recommended_configuration = false, +}; + +static struct aws_s3_platform_info s_c5n_metal_platform_info = { + .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("c5n.metal"), + .max_throughput_gbps = 100u, + .cpu_group_info_array = s_c5n_18xlarge_cpu_group_info_array, + .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_c5n_18xlarge_cpu_group_info_array), + /** not yet **/ + .has_recommended_configuration = false, +}; + +/****** End c5n.18xlarge *****/ + +/****** Begin c5n.large ******/ +static struct aws_s3_cpu_group_info s_c5n_9xlarge_cpu_group_info_array[] = { + { + .cpu_group = 0u, + .nic_name_array = s_c5n_nic_array, + .nic_name_array_length = AWS_ARRAY_SIZE(s_c5n_nic_array), + .cpus_in_group = 36, + }, +}; + +static struct aws_s3_platform_info s_c5n_9xlarge_platform_info = { + .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("c5n.9xlarge"), + .max_throughput_gbps = 50u, + .cpu_group_info_array = s_c5n_9xlarge_cpu_group_info_array, + .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_c5n_9xlarge_cpu_group_info_array), + /** not yet **/ + .has_recommended_configuration = false, +}; + +/****** End c5n.9large *****/ + +/***** Begin p4d.24xlarge and p4de.24xlarge ****/ +static struct aws_byte_cursor s_p4d_socket1_array[] = { + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth0"), + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth1"), +}; + +static struct aws_byte_cursor s_p4d_socket2_array[] = { + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth2"), + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth3"), +}; + +static struct aws_s3_cpu_group_info s_p4d_cpu_group_info_array[] = { + { + .cpu_group = 0u, + .nic_name_array = s_p4d_socket1_array, + .nic_name_array_length = AWS_ARRAY_SIZE(s_p4d_socket1_array), + .cpus_in_group = 48, + }, + { + .cpu_group = 1u, + .nic_name_array = s_p4d_socket2_array, + .nic_name_array_length = AWS_ARRAY_SIZE(s_p4d_socket1_array), + .cpus_in_group = 48, + }, +}; + +static struct aws_s3_platform_info s_p4d_platform_info = { + .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("p4d.24xlarge"), + .max_throughput_gbps = 400u, + .cpu_group_info_array = s_p4d_cpu_group_info_array, + .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_p4d_cpu_group_info_array), + .has_recommended_configuration = true, +}; + +static struct aws_s3_platform_info s_p4de_platform_info = { + .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("p4de.24xlarge"), + .max_throughput_gbps = 400u, + .cpu_group_info_array = s_p4d_cpu_group_info_array, + .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_p4d_cpu_group_info_array), + .has_recommended_configuration = true, +}; + +/***** End p4d.24xlarge and p4de.24xlarge ****/ + +/***** Begin p5.48xlarge ******/ + +/* note: the p5 is a stunningly massive instance type. + * While the specs have 3.2 TB/s for the network bandwidth + * not all of that is accessible from the CPU. From the CPU we'll + * be able to get around 400 Gbps. Also note, 3.2 TB/s + * with 2 sockets on a nitro instance inplies 16 NICs + * per node. However, practically, due to the topology of this instance + * as far as this client is concerned, there are two NICs per node, similar + * to the p4d. The rest is for other things on the machine to use. */ + +struct aws_byte_cursor s_p5_socket1_array[] = { + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth0"), + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth1"), +}; + +static struct aws_byte_cursor s_p5_socket2_array[] = { + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth2"), + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth3"), +}; + +static struct aws_s3_cpu_group_info s_p5_cpu_group_info_array[] = { + { + .cpu_group = 0u, + .nic_name_array = s_p5_socket1_array, + .nic_name_array_length = AWS_ARRAY_SIZE(s_p5_socket1_array), + .cpus_in_group = 96, + }, + { + .cpu_group = 1u, + .nic_name_array = s_p5_socket2_array, + .nic_name_array_length = AWS_ARRAY_SIZE(s_p5_socket2_array), + .cpus_in_group = 96, + }, +}; + +struct aws_s3_platform_info s_p5_platform_info = { + .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("p5.48xlarge"), + .max_throughput_gbps = 400u, + .cpu_group_info_array = s_p5_cpu_group_info_array, + .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_p5_cpu_group_info_array), + .has_recommended_configuration = true, +}; + +/***** End p5.48xlarge *****/ + +/**** Begin trn1_32_large *****/ +struct aws_byte_cursor s_trn1_n_socket1_array[] = { + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth0"), + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth1"), + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth2"), + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth3"), + +}; + +static struct aws_byte_cursor s_trn1_n_socket2_array[] = { + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth4"), + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth5"), + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth6"), + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth7"), +}; + +static struct aws_s3_cpu_group_info s_trn1_n_cpu_group_info_array[] = { + { + .cpu_group = 0u, + .nic_name_array = s_trn1_n_socket1_array, + .nic_name_array_length = AWS_ARRAY_SIZE(s_trn1_n_socket1_array), + .cpus_in_group = 64, + }, + { + .cpu_group = 1u, + .nic_name_array = s_trn1_n_socket2_array, + .nic_name_array_length = AWS_ARRAY_SIZE(s_trn1_n_socket2_array), + .cpus_in_group = 64, + }, +}; + +static struct aws_s3_platform_info s_trn1_n_platform_info = { + .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("trn1n.32xlarge"), + /* not all of the advertised 1600 Gbps bandwidth can be hit from the cpu in user-space */ + .max_throughput_gbps = 800, + .cpu_group_info_array = s_trn1_n_cpu_group_info_array, + .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_trn1_n_cpu_group_info_array), + .has_recommended_configuration = true, +}; + +struct aws_byte_cursor s_trn1_socket1_array[] = { + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth0"), + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth1"), +}; + +static struct aws_byte_cursor s_trn1_socket2_array[] = { + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth3"), + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth4"), +}; + +static struct aws_s3_cpu_group_info s_trn1_cpu_group_info_array[] = { + { + .cpu_group = 0u, + .nic_name_array = s_trn1_socket1_array, + .nic_name_array_length = AWS_ARRAY_SIZE(s_trn1_socket1_array), + .cpus_in_group = 64, + }, + { + .cpu_group = 1u, + .nic_name_array = s_trn1_socket2_array, + .nic_name_array_length = AWS_ARRAY_SIZE(s_trn1_socket2_array), + .cpus_in_group = 64, + }, +}; + +static struct aws_s3_platform_info s_trn1_platform_info = { + .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("trn1.32xlarge"), + /* not all of the advertised 800 Gbps bandwidth can be hit from the cpu in user-space */ + .max_throughput_gbps = 600, + .cpu_group_info_array = s_trn1_cpu_group_info_array, + .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_trn1_cpu_group_info_array), + .has_recommended_configuration = true, +}; + +/**** End trn1.x32_large ******/ + +struct aws_s3_platform_info_loader { + struct aws_allocator *allocator; + struct aws_ref_count ref_count; + struct { + struct aws_string *detected_instance_type; + struct aws_s3_platform_info current_env_platform_info; + /* aws_hash_table<aws_byte_cursor*, aws_s3_platform_info *> + * the table does not "own" any of the data inside it. */ + struct aws_hash_table compute_platform_info_table; + struct aws_mutex lock; + } lock_data; + struct aws_system_environment *current_env; +}; + +void s_add_platform_info_to_table(struct aws_s3_platform_info_loader *loader, struct aws_s3_platform_info *info) { + AWS_PRECONDITION(info->instance_type.len > 0); + AWS_LOGF_TRACE( + AWS_LS_S3_GENERAL, + "id=%p: adding platform entry for \"" PRInSTR "\".", + (void *)loader, + AWS_BYTE_CURSOR_PRI(info->instance_type)); + + struct aws_hash_element *platform_info_element = NULL; + aws_hash_table_find(&loader->lock_data.compute_platform_info_table, &info->instance_type, &platform_info_element); + if (platform_info_element) { + AWS_LOGF_TRACE( + AWS_LS_S3_GENERAL, + "id=%p: existing entry for \"" PRInSTR "\" found, syncing the values.", + (void *)loader, + AWS_BYTE_CURSOR_PRI(info->instance_type)); + + /* detected runtime NIC data is better than the pre-known config data but we don't always have it, + * so copy over any better info than we have. Assume if info has NIC data, it was discovered at runtime. + * The other data should be identical and we don't want to add complications to the memory model. + * You're guaranteed only one instance of an instance type's info, the initial load is static memory */ + struct aws_s3_platform_info *existing = platform_info_element->value; + // TODO: sync the cpu group and NIC data + info->has_recommended_configuration = existing->has_recommended_configuration; + /* always prefer a pre-known bandwidth, as we estimate low on EC2 by default for safety. */ + info->max_throughput_gbps = existing->max_throughput_gbps; + } else { + AWS_FATAL_ASSERT( + !aws_hash_table_put( + &loader->lock_data.compute_platform_info_table, &info->instance_type, (void *)info, NULL) && + "hash table put failed!"); + } +} + +static void s_destroy_loader(void *arg) { + struct aws_s3_platform_info_loader *loader = arg; + + aws_hash_table_clean_up(&loader->lock_data.compute_platform_info_table); + aws_mutex_clean_up(&loader->lock_data.lock); + + if (loader->lock_data.detected_instance_type) { + aws_string_destroy(loader->lock_data.detected_instance_type); + } + + aws_system_environment_release(loader->current_env); + aws_mem_release(loader->allocator, loader); +} + +struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_new(struct aws_allocator *allocator) { + struct aws_s3_platform_info_loader *loader = + aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_platform_info_loader)); + + loader->allocator = allocator; + loader->current_env = aws_system_environment_load(allocator); + AWS_FATAL_ASSERT(loader->current_env && "Failed to load system environment"); + aws_mutex_init(&loader->lock_data.lock); + aws_ref_count_init(&loader->ref_count, loader, s_destroy_loader); + + /* TODO: Implement runtime CPU information retrieval from the system. Currently, Valgrind detects a memory leak + * associated with the g_numa_node_of_cpu_ptr function (see: https://github.com/numactl/numactl/issues/3). This + * issue was addressed in version v2.0.13 of libnuma (see: https://github.com/numactl/numactl/pull/43). However, + * Amazon Linux 2 defaults to libnuma version v2.0.9, which lacks this fix. We need to suppress this + * warning as a false positive in older versions of libnuma. In the future, however, we will probably eliminate the + * use of numactl altogether. */ + + AWS_FATAL_ASSERT( + !aws_hash_table_init( + &loader->lock_data.compute_platform_info_table, + allocator, + 32, + aws_hash_byte_cursor_ptr_ignore_case, + (aws_hash_callback_eq_fn *)aws_byte_cursor_eq_ignore_case, + NULL, + NULL) && + "Hash table init failed!"); + + s_add_platform_info_to_table(loader, &s_c5n_18xlarge_platform_info); + s_add_platform_info_to_table(loader, &s_c5n_9xlarge_platform_info); + s_add_platform_info_to_table(loader, &s_c5n_metal_platform_info); + s_add_platform_info_to_table(loader, &s_p4d_platform_info); + s_add_platform_info_to_table(loader, &s_p4de_platform_info); + s_add_platform_info_to_table(loader, &s_p5_platform_info); + s_add_platform_info_to_table(loader, &s_trn1_n_platform_info); + s_add_platform_info_to_table(loader, &s_trn1_platform_info); + + return loader; +} + +struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_acquire(struct aws_s3_platform_info_loader *loader) { + aws_ref_count_acquire(&loader->ref_count); + return loader; +} + +struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_release(struct aws_s3_platform_info_loader *loader) { + if (loader) { + aws_ref_count_release(&loader->ref_count); + } + return NULL; +} + +struct imds_callback_info { + struct aws_allocator *allocator; + struct aws_string *instance_type; + struct aws_condition_variable c_var; + int error_code; + bool shutdown_completed; + struct aws_mutex mutex; +}; + +static void s_imds_client_shutdown_completed(void *user_data) { + struct imds_callback_info *info = user_data; + aws_mutex_lock(&info->mutex); + info->shutdown_completed = true; + aws_condition_variable_notify_all(&info->c_var); + aws_mutex_unlock(&info->mutex); +} + +static bool s_client_shutdown_predicate(void *arg) { + struct imds_callback_info *info = arg; + return info->shutdown_completed; +} + +static void s_imds_client_on_get_instance_info_callback( + const struct aws_imds_instance_info *instance_info, + int error_code, + void *user_data) { + struct imds_callback_info *info = user_data; + + aws_mutex_lock(&info->mutex); + if (error_code) { + info->error_code = error_code; + } else { + info->instance_type = aws_string_new_from_cursor(info->allocator, &instance_info->instance_type); + } + aws_condition_variable_notify_all(&info->c_var); + aws_mutex_unlock(&info->mutex); +} + +static bool s_completion_predicate(void *arg) { + struct imds_callback_info *info = arg; + return info->error_code != 0 || info->instance_type != NULL; +} + +struct aws_string *s_query_imds_for_instance_type(struct aws_allocator *allocator) { + + struct imds_callback_info callback_info = { + .mutex = AWS_MUTEX_INIT, + .c_var = AWS_CONDITION_VARIABLE_INIT, + .allocator = allocator, + }; + + struct aws_event_loop_group *el_group = NULL; + struct aws_host_resolver *resolver = NULL; + struct aws_client_bootstrap *client_bootstrap = NULL; + /* now call IMDS */ + el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + + if (!el_group) { + goto tear_down; + } + + struct aws_host_resolver_default_options resolver_options = { + .max_entries = 1, + .el_group = el_group, + }; + + resolver = aws_host_resolver_new_default(allocator, &resolver_options); + + if (!resolver) { + goto tear_down; + } + + struct aws_client_bootstrap_options bootstrap_options = { + .event_loop_group = el_group, + .host_resolver = resolver, + }; + + client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); + + if (!client_bootstrap) { + goto tear_down; + } + + struct aws_imds_client_shutdown_options imds_shutdown_options = { + .shutdown_callback = s_imds_client_shutdown_completed, + .shutdown_user_data = &callback_info, + }; + + struct aws_imds_client_options imds_options = { + .bootstrap = client_bootstrap, + .imds_version = IMDS_PROTOCOL_V2, + .shutdown_options = imds_shutdown_options, + }; + + struct aws_imds_client *imds_client = aws_imds_client_new(allocator, &imds_options); + + if (!imds_client) { + goto tear_down; + } + + aws_mutex_lock(&callback_info.mutex); + + if (aws_imds_client_get_instance_info(imds_client, s_imds_client_on_get_instance_info_callback, &callback_info)) { + aws_condition_variable_wait_for_pred( + &callback_info.c_var, &callback_info.mutex, AWS_TIMESTAMP_SECS, s_completion_predicate, &callback_info); + } + aws_imds_client_release(imds_client); + aws_condition_variable_wait_pred( + &callback_info.c_var, &callback_info.mutex, s_client_shutdown_predicate, &callback_info); + + aws_mutex_unlock(&callback_info.mutex); + + if (callback_info.error_code) { + aws_raise_error(callback_info.error_code); + AWS_LOGF_ERROR( + AWS_LS_S3_CLIENT, "IMDS call failed with error %s.", aws_error_debug_str(callback_info.error_code)); + } + +tear_down: + if (client_bootstrap) { + aws_client_bootstrap_release(client_bootstrap); + } + + if (resolver) { + aws_host_resolver_release(resolver); + } + + if (el_group) { + aws_event_loop_group_release(el_group); + } + return callback_info.instance_type; +} + +struct aws_byte_cursor aws_s3_get_ec2_instance_type(struct aws_s3_platform_info_loader *loader, bool cached_only) { + aws_mutex_lock(&loader->lock_data.lock); + struct aws_byte_cursor return_cur; + AWS_ZERO_STRUCT(return_cur); + + if (loader->lock_data.detected_instance_type) { + AWS_LOGF_TRACE( + AWS_LS_S3_CLIENT, + "id=%p: Instance type has already been determined to be %s. Returning cached version.", + (void *)loader, + aws_string_bytes(loader->lock_data.detected_instance_type)); + goto return_instance_and_unlock; + } + if (cached_only) { + AWS_LOGF_TRACE( + AWS_LS_S3_CLIENT, + "id=%p: Instance type has not been cached. Returning without trying to determine instance type since " + "cached_only is set.", + (void *)loader); + goto return_instance_and_unlock; + } + + AWS_LOGF_TRACE( + AWS_LS_S3_CLIENT, + "id=%p: Instance type has not been determined, checking to see if running in EC2 nitro environment.", + (void *)loader); + /* + * We want to only imds call if we know that we are on an ec2 instance. All new instances are Nitro and we don't + * care about the old ones. + */ + if (aws_s3_is_running_on_ec2_nitro(loader)) { + AWS_LOGF_INFO( + AWS_LS_S3_CLIENT, "id=%p: Detected Amazon EC2 with nitro as the current environment.", (void *)loader); + /* easy case not requiring any calls out to IMDS. If we detected we're running on ec2, then the dmi info is + * correct, and we can use it if we have it. Otherwise call out to IMDS. */ + struct aws_byte_cursor product_name = + aws_system_environment_get_virtualization_product_name(loader->current_env); + + if (product_name.len) { + loader->lock_data.detected_instance_type = aws_string_new_from_cursor(loader->allocator, &product_name); + loader->lock_data.current_env_platform_info.instance_type = + aws_byte_cursor_from_string(loader->lock_data.detected_instance_type); + s_add_platform_info_to_table(loader, &loader->lock_data.current_env_platform_info); + + AWS_LOGF_INFO( + AWS_LS_S3_CLIENT, + "id=%p: Determined instance type to be %s, from dmi info. Caching.", + (void *)loader, + aws_string_bytes(loader->lock_data.detected_instance_type)); + goto return_instance_and_unlock; + } + + AWS_LOGF_DEBUG( + AWS_LS_S3_CLIENT, + "static: DMI info was insufficient to determine instance type. Making call to IMDS to determine"); + struct aws_string *instance_type = s_query_imds_for_instance_type(loader->allocator); + if (instance_type) { + loader->lock_data.detected_instance_type = instance_type; + loader->lock_data.current_env_platform_info.instance_type = aws_byte_cursor_from_string(instance_type); + s_add_platform_info_to_table(loader, &loader->lock_data.current_env_platform_info); + AWS_LOGF_INFO( + AWS_LS_S3_CLIENT, + "id=%p: Determined instance type to be %s, from IMDS.", + (void *)loader, + aws_string_bytes(loader->lock_data.detected_instance_type)); + } + } + +return_instance_and_unlock: + return_cur = loader->lock_data.current_env_platform_info.instance_type; + aws_mutex_unlock(&loader->lock_data.lock); + + return return_cur; +} + +const struct aws_s3_platform_info *aws_s3_get_platform_info_for_current_environment( + struct aws_s3_platform_info_loader *loader) { + /* getting the instance type will set it on the loader the first time if it can */ + aws_s3_get_ec2_instance_type(loader, false /*cached_only*/); + /* will never be mutated after the above call. */ + return &loader->lock_data.current_env_platform_info; +} + +struct aws_array_list aws_s3_get_recommended_platforms(struct aws_s3_platform_info_loader *loader) { + struct aws_array_list array_list; + aws_mutex_lock(&loader->lock_data.lock); + aws_array_list_init_dynamic(&array_list, loader->allocator, 5, sizeof(struct aws_byte_cursor)); + /* Iterate over the map and add instance types to the array list which have + * platform_info->has_recommended_configuration == true */ + for (struct aws_hash_iter iter = aws_hash_iter_begin(&loader->lock_data.compute_platform_info_table); + !aws_hash_iter_done(&iter); + aws_hash_iter_next(&iter)) { + struct aws_s3_platform_info *platform_info = iter.element.value; + + if (platform_info->has_recommended_configuration) { + aws_array_list_push_back(&array_list, &platform_info->instance_type); + } + } + aws_mutex_unlock(&loader->lock_data.lock); + return array_list; +} + +const struct aws_s3_platform_info *aws_s3_get_platform_info_for_instance_type( + struct aws_s3_platform_info_loader *loader, + struct aws_byte_cursor instance_type_name) { + aws_mutex_lock(&loader->lock_data.lock); + struct aws_hash_element *platform_info_element = NULL; + aws_hash_table_find(&loader->lock_data.compute_platform_info_table, &instance_type_name, &platform_info_element); + aws_mutex_unlock(&loader->lock_data.lock); + + if (platform_info_element) { + return platform_info_element->value; + } + + return NULL; +} + +bool aws_s3_is_running_on_ec2_nitro(struct aws_s3_platform_info_loader *loader) { + struct aws_byte_cursor system_virt_name = aws_system_environment_get_virtualization_vendor(loader->current_env); + + if (aws_byte_cursor_eq_c_str_ignore_case(&system_virt_name, "amazon ec2")) { + return true; + } + + return false; +} diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_request.c b/contrib/restricted/aws/aws-c-s3/source/s3_request.c index d92dfa955b3..aab3d952656 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_request.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_request.c @@ -1,15 +1,25 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + #include "aws/s3/private/s3_request.h" #include "aws/s3/private/s3_meta_request_impl.h" +#include "aws/s3/private/s3_util.h" #include <aws/auth/signable.h> +#include <aws/common/clock.h> #include <aws/io/stream.h> +#include <aws/s3/s3_client.h> static void s_s3_request_destroy(void *user_data); struct aws_s3_request *aws_s3_request_new( struct aws_s3_meta_request *meta_request, int request_tag, + enum aws_s3_request_type request_type, uint32_t part_number, uint32_t flags) { + AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->allocator); @@ -21,9 +31,17 @@ struct aws_s3_request *aws_s3_request_new( request->meta_request = aws_s3_meta_request_acquire(meta_request); request->request_tag = request_tag; + request->request_type = request_type; + + const char *operation_name = aws_s3_request_type_operation_name(request_type); + if (operation_name[0] != '\0') { + request->operation_name = aws_string_new_from_c_str(request->allocator, operation_name); + } + request->part_number = part_number; request->record_response_headers = (flags & AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS) != 0; - request->part_size_response_body = (flags & AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY) != 0; + request->has_part_size_response_body = (flags & AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY) != 0; + request->has_part_size_request_body = (flags & AWS_S3_REQUEST_FLAG_PART_SIZE_REQUEST_BODY) != 0; request->always_send = (flags & AWS_S3_REQUEST_FLAG_ALWAYS_SEND) != 0; return request; @@ -33,9 +51,31 @@ void aws_s3_request_setup_send_data(struct aws_s3_request *request, struct aws_h AWS_PRECONDITION(request); AWS_PRECONDITION(message); + if (request != NULL && request->send_data.metrics != NULL) { + /* If there is a metrics from previous attempt, complete it now. */ + struct aws_s3_request_metrics *metric = request->send_data.metrics; + aws_high_res_clock_get_ticks((uint64_t *)&metric->time_metrics.end_timestamp_ns); + metric->time_metrics.total_duration_ns = + metric->time_metrics.end_timestamp_ns - metric->time_metrics.start_timestamp_ns; + + struct aws_s3_meta_request *meta_request = request->meta_request; + if (meta_request != NULL && meta_request->telemetry_callback != NULL) { + + aws_s3_meta_request_lock_synced_data(meta_request); + struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_TELEMETRY}; + event.u.telemetry.metrics = aws_s3_request_metrics_acquire(metric); + aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); + aws_s3_meta_request_unlock_synced_data(meta_request); + } + request->send_data.metrics = aws_s3_request_metrics_release(metric); + } aws_s3_request_clean_up_send_data(request); request->send_data.message = message; + request->send_data.metrics = aws_s3_request_metrics_new(request->allocator, request, message); + /* Start the timestamp */ + aws_high_res_clock_get_ticks((uint64_t *)&request->send_data.metrics->time_metrics.start_timestamp_ns); + aws_http_message_acquire(message); } @@ -54,12 +94,13 @@ static void s_s3_request_clean_up_send_data_message(struct aws_s3_request *reque void aws_s3_request_clean_up_send_data(struct aws_s3_request *request) { AWS_PRECONDITION(request); + /* The metrics should be collected and provided to user before reaching here */ + AWS_FATAL_ASSERT(request->send_data.metrics == NULL); s_s3_request_clean_up_send_data_message(request); aws_signable_destroy(request->send_data.signable); request->send_data.signable = NULL; - aws_http_headers_release(request->send_data.response_headers); request->send_data.response_headers = NULL; @@ -68,18 +109,18 @@ void aws_s3_request_clean_up_send_data(struct aws_s3_request *request) { AWS_ZERO_STRUCT(request->send_data); } -void aws_s3_request_acquire(struct aws_s3_request *request) { - AWS_PRECONDITION(request); - - aws_ref_count_acquire(&request->ref_count); +struct aws_s3_request *aws_s3_request_acquire(struct aws_s3_request *request) { + if (request != NULL) { + aws_ref_count_acquire(&request->ref_count); + } + return request; } -void aws_s3_request_release(struct aws_s3_request *request) { - if (request == NULL) { - return; +struct aws_s3_request *aws_s3_request_release(struct aws_s3_request *request) { + if (request != NULL) { + aws_ref_count_release(&request->ref_count); } - - aws_ref_count_release(&request->ref_count); + return NULL; } static void s_s3_request_destroy(void *user_data) { @@ -91,7 +132,301 @@ static void s_s3_request_destroy(void *user_data) { aws_s3_request_clean_up_send_data(request); aws_byte_buf_clean_up(&request->request_body); + aws_s3_buffer_pool_release_ticket(request->meta_request->client->buffer_pool, request->ticket); + aws_string_destroy(request->operation_name); aws_s3_meta_request_release(request->meta_request); aws_mem_release(request->allocator, request); } + +static void s_s3_request_metrics_destroy(void *arg) { + struct aws_s3_request_metrics *metrics = arg; + if (metrics == NULL) { + return; + } + aws_http_headers_release(metrics->req_resp_info_metrics.response_headers); + aws_string_destroy(metrics->req_resp_info_metrics.request_path_query); + aws_string_destroy(metrics->req_resp_info_metrics.host_address); + aws_string_destroy(metrics->req_resp_info_metrics.request_id); + aws_string_destroy(metrics->req_resp_info_metrics.operation_name); + aws_string_destroy(metrics->crt_info_metrics.ip_address); + + aws_mem_release(metrics->allocator, metrics); +} + +struct aws_s3_request_metrics *aws_s3_request_metrics_new( + struct aws_allocator *allocator, + const struct aws_s3_request *request, + const struct aws_http_message *message) { + + struct aws_s3_request_metrics *metrics = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_request_metrics)); + metrics->allocator = allocator; + struct aws_byte_cursor out_path; + AWS_ZERO_STRUCT(out_path); + int err = aws_http_message_get_request_path(message, &out_path); + /* If there is no path of the message, it should be a program error. */ + AWS_ASSERT(!err); + metrics->req_resp_info_metrics.request_path_query = aws_string_new_from_cursor(allocator, &out_path); + AWS_ASSERT(metrics->req_resp_info_metrics.request_path_query != NULL); + + /* Get the host header value */ + struct aws_byte_cursor host_header_value; + AWS_ZERO_STRUCT(host_header_value); + struct aws_http_headers *message_headers = aws_http_message_get_headers(message); + AWS_ASSERT(message_headers); + err = aws_http_headers_get(message_headers, g_host_header_name, &host_header_value); + AWS_ASSERT(!err); + metrics->req_resp_info_metrics.host_address = aws_string_new_from_cursor(allocator, &host_header_value); + AWS_ASSERT(metrics->req_resp_info_metrics.host_address != NULL); + + metrics->req_resp_info_metrics.request_type = request->request_type; + + if (request->operation_name != NULL) { + metrics->req_resp_info_metrics.operation_name = aws_string_new_from_string(allocator, request->operation_name); + } + + metrics->time_metrics.start_timestamp_ns = -1; + metrics->time_metrics.end_timestamp_ns = -1; + metrics->time_metrics.total_duration_ns = -1; + metrics->time_metrics.send_start_timestamp_ns = -1; + metrics->time_metrics.send_end_timestamp_ns = -1; + metrics->time_metrics.sending_duration_ns = -1; + metrics->time_metrics.receive_start_timestamp_ns = -1; + metrics->time_metrics.receive_end_timestamp_ns = -1; + metrics->time_metrics.receiving_duration_ns = -1; + metrics->time_metrics.sign_start_timestamp_ns = -1; + metrics->time_metrics.sign_end_timestamp_ns = -1; + metrics->time_metrics.signing_duration_ns = -1; + + metrics->req_resp_info_metrics.response_status = -1; + + (void)err; + aws_ref_count_init(&metrics->ref_count, metrics, s_s3_request_metrics_destroy); + + return metrics; +} +struct aws_s3_request_metrics *aws_s3_request_metrics_acquire(struct aws_s3_request_metrics *metrics) { + if (!metrics) { + return NULL; + } + + aws_ref_count_acquire(&metrics->ref_count); + return metrics; +} +struct aws_s3_request_metrics *aws_s3_request_metrics_release(struct aws_s3_request_metrics *metrics) { + if (metrics != NULL) { + aws_ref_count_release(&metrics->ref_count); + } + return NULL; +} + +int aws_s3_request_metrics_get_request_id( + const struct aws_s3_request_metrics *metrics, + const struct aws_string **out_request_id) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(out_request_id); + if (metrics->req_resp_info_metrics.request_id == NULL) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *out_request_id = metrics->req_resp_info_metrics.request_id; + return AWS_OP_SUCCESS; +} + +void aws_s3_request_metrics_get_start_timestamp_ns(const struct aws_s3_request_metrics *metrics, uint64_t *start_time) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(start_time); + *start_time = metrics->time_metrics.start_timestamp_ns; +} + +void aws_s3_request_metrics_get_end_timestamp_ns(const struct aws_s3_request_metrics *metrics, uint64_t *end_time) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(end_time); + *end_time = metrics->time_metrics.end_timestamp_ns; +} + +void aws_s3_request_metrics_get_total_duration_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *total_duration) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(total_duration); + *total_duration = metrics->time_metrics.total_duration_ns; +} + +int aws_s3_request_metrics_get_send_start_timestamp_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *send_start_time) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(send_start_time); + if (metrics->time_metrics.send_start_timestamp_ns < 0) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *send_start_time = metrics->time_metrics.send_start_timestamp_ns; + return AWS_OP_SUCCESS; +} + +int aws_s3_request_metrics_get_send_end_timestamp_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *send_end_time) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(send_end_time); + if (metrics->time_metrics.send_end_timestamp_ns < 0) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *send_end_time = metrics->time_metrics.send_end_timestamp_ns; + return AWS_OP_SUCCESS; +} + +int aws_s3_request_metrics_get_sending_duration_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *sending_duration) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(sending_duration); + if (metrics->time_metrics.sending_duration_ns < 0) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *sending_duration = metrics->time_metrics.sending_duration_ns; + return AWS_OP_SUCCESS; +} + +int aws_s3_request_metrics_get_receive_start_timestamp_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *receive_start_time) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(receive_start_time); + if (metrics->time_metrics.receive_start_timestamp_ns < 0) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *receive_start_time = metrics->time_metrics.receive_start_timestamp_ns; + return AWS_OP_SUCCESS; +} + +int aws_s3_request_metrics_get_receive_end_timestamp_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *receive_end_time) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(receive_end_time); + if (metrics->time_metrics.receive_end_timestamp_ns < 0) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *receive_end_time = metrics->time_metrics.receive_end_timestamp_ns; + return AWS_OP_SUCCESS; +} + +int aws_s3_request_metrics_get_receiving_duration_ns( + const struct aws_s3_request_metrics *metrics, + uint64_t *receiving_duration) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(receiving_duration); + if (metrics->time_metrics.receiving_duration_ns < 0) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *receiving_duration = metrics->time_metrics.receiving_duration_ns; + return AWS_OP_SUCCESS; +} + +int aws_s3_request_metrics_get_response_status_code( + const struct aws_s3_request_metrics *metrics, + int *response_status) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(response_status); + if (metrics->req_resp_info_metrics.response_status == -1) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *response_status = metrics->req_resp_info_metrics.response_status; + return AWS_OP_SUCCESS; +} + +int aws_s3_request_metrics_get_response_headers( + const struct aws_s3_request_metrics *metrics, + struct aws_http_headers **response_headers) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(response_headers); + if (metrics->req_resp_info_metrics.response_headers == NULL) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *response_headers = metrics->req_resp_info_metrics.response_headers; + return AWS_OP_SUCCESS; +} + +void aws_s3_request_metrics_get_request_path_query( + const struct aws_s3_request_metrics *metrics, + const struct aws_string **request_path_query) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(request_path_query); + *request_path_query = metrics->req_resp_info_metrics.request_path_query; +} + +void aws_s3_request_metrics_get_host_address( + const struct aws_s3_request_metrics *metrics, + const struct aws_string **host_address) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(host_address); + *host_address = metrics->req_resp_info_metrics.host_address; +} + +int aws_s3_request_metrics_get_ip_address( + const struct aws_s3_request_metrics *metrics, + const struct aws_string **ip_address) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(ip_address); + if (metrics->crt_info_metrics.ip_address == NULL) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *ip_address = metrics->crt_info_metrics.ip_address; + return AWS_OP_SUCCESS; +} + +int aws_s3_request_metrics_get_connection_id(const struct aws_s3_request_metrics *metrics, size_t *connection_id) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(connection_id); + if (metrics->crt_info_metrics.connection_id == NULL) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *connection_id = (size_t)metrics->crt_info_metrics.connection_id; + return AWS_OP_SUCCESS; +} + +int aws_s3_request_metrics_get_thread_id(const struct aws_s3_request_metrics *metrics, aws_thread_id_t *thread_id) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(thread_id); + if (metrics->crt_info_metrics.thread_id == 0) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *thread_id = metrics->crt_info_metrics.thread_id; + return AWS_OP_SUCCESS; +} + +int aws_s3_request_metrics_get_request_stream_id(const struct aws_s3_request_metrics *metrics, uint32_t *stream_id) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(stream_id); + if (metrics->crt_info_metrics.stream_id == 0) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *stream_id = metrics->crt_info_metrics.stream_id; + return AWS_OP_SUCCESS; +} + +int aws_s3_request_metrics_get_operation_name( + const struct aws_s3_request_metrics *metrics, + const struct aws_string **out_operation_name) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(out_operation_name); + if (metrics->req_resp_info_metrics.operation_name == NULL) { + return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); + } + *out_operation_name = metrics->req_resp_info_metrics.operation_name; + return AWS_OP_SUCCESS; +} + +void aws_s3_request_metrics_get_request_type( + const struct aws_s3_request_metrics *metrics, + enum aws_s3_request_type *out_request_type) { + AWS_PRECONDITION(metrics); + AWS_PRECONDITION(out_request_type); + *out_request_type = metrics->req_resp_info_metrics.request_type; +} + +int aws_s3_request_metrics_get_error_code(const struct aws_s3_request_metrics *metrics) { + AWS_PRECONDITION(metrics); + return metrics->crt_info_metrics.error_code; +} diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_request_messages.c b/contrib/restricted/aws/aws-c-s3/source/s3_request_messages.c index 93aa00a08d8..57ad2cc813b 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_request_messages.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_request_messages.c @@ -4,8 +4,6 @@ */ #include "aws/s3/private/s3_request_messages.h" -#include "aws/s3/private/s3_checksums.h" -#include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_util.h" #include <aws/cal/hash.h> @@ -13,9 +11,8 @@ #include <aws/common/encoding.h> #include <aws/common/string.h> #include <aws/http/request_response.h> +#include <aws/io/async_stream.h> #include <aws/io/stream.h> -#include <aws/io/uri.h> -#include <aws/s3/s3.h> #include <inttypes.h> const struct aws_byte_cursor g_s3_create_multipart_upload_excluded_headers[] = { @@ -118,6 +115,7 @@ const struct aws_byte_cursor g_s3_complete_multipart_upload_with_checksum_exclud AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"), + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-sdk-checksum-algorithm"), }; const struct aws_byte_cursor g_s3_list_parts_excluded_headers[] = { @@ -340,7 +338,7 @@ struct aws_http_message *aws_s3_upload_part_message_new( if (should_compute_content_md5) { if (!checksum_config || checksum_config->location == AWS_SCL_NONE) { - /* MD5 will be skiped if flexible checksum used */ + /* MD5 will be skipped if flexible checksum used */ if (aws_s3_message_util_add_content_md5_header(allocator, buffer, message)) { goto error_clean_up; } @@ -420,111 +418,112 @@ error_clean_up: return NULL; } -/* Creates a HEAD GetObject request to get the size of the specified object. */ -struct aws_http_message *aws_s3_get_object_size_message_new( +static const struct aws_byte_cursor s_slash_char = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/"); +/** + * For the CopyObject operation, create the initial HEAD message to retrieve the size of the copy source. + */ +struct aws_http_message *aws_s3_get_source_object_size_message_new( struct aws_allocator *allocator, - struct aws_http_message *base_message, - struct aws_byte_cursor source_bucket, - struct aws_byte_cursor source_key) { - - (void)base_message; + struct aws_http_message *base_message) { + struct aws_http_message *message = NULL; + struct aws_byte_buf head_object_host_header; + AWS_ZERO_STRUCT(head_object_host_header); AWS_PRECONDITION(allocator); - struct aws_http_message *message = aws_http_message_new_request(allocator); - - if (message == NULL) { + /* Find the x-amz-copy-source header, to extract source bucket/key information. */ + struct aws_http_headers *headers = aws_http_message_get_headers(base_message); + if (!headers) { + AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "CopyRequest is missing headers"); return NULL; } - const struct aws_byte_cursor head_operation = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HEAD"); - if (aws_http_message_set_request_method(message, head_operation)) { - goto error_clean_up; + struct aws_byte_cursor source_header; + const struct aws_byte_cursor copy_source_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"); + if (aws_http_headers_get(headers, copy_source_header, &source_header) != AWS_OP_SUCCESS) { + AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "CopyRequest is missing the x-amz-copy-source header"); + return NULL; + } + struct aws_byte_cursor host; + if (aws_http_headers_get(headers, g_host_header_name, &host) != AWS_OP_SUCCESS) { + AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "CopyRequest is missing the Host header"); + return NULL; } - char destination_path[1024]; - snprintf(destination_path, sizeof(destination_path), "/%.*s", (int)source_key.len, source_key.ptr); - /* TODO: url encode */ + struct aws_byte_cursor request_path = source_header; - if (aws_http_message_set_request_path(message, aws_byte_cursor_from_c_str(destination_path))) { - goto error_clean_up; + /* Skip optional leading slash. */ + if (aws_byte_cursor_starts_with(&request_path, &s_slash_char)) { + aws_byte_cursor_advance(&request_path, 1); } - char host_header_value[1024]; - /* TODO: Fix the hard-coded host name. */ - snprintf( - host_header_value, - sizeof(host_header_value), - "%.*s.s3.us-west-2.amazonaws.com", - (int)source_bucket.len, - source_bucket.ptr); - struct aws_http_header host_header = { - .name = g_host_header_name, - .value = aws_byte_cursor_from_c_str(host_header_value), - }; - aws_http_message_add_header(message, host_header); - - aws_http_message_set_body_stream(message, NULL); + /* From this point forward, the format is {bucket}/{key} - split + components.*/ - return message; + struct aws_byte_cursor source_bucket = {0}; -error_clean_up: - - if (message != NULL) { - aws_http_message_release(message); - message = NULL; + if (aws_byte_cursor_next_split(&request_path, '/', &source_bucket)) { + aws_byte_cursor_advance(&request_path, source_bucket.len); } - return NULL; -} - -/* Creates a HEAD GetObject sub-request to get the size of the source object of a Copy meta request. */ -struct aws_http_message *aws_s3_get_source_object_size_message_new( - struct aws_allocator *allocator, - struct aws_http_message *base_message) { - AWS_PRECONDITION(allocator); + if (source_bucket.len == 0 || request_path.len == 0) { + AWS_LOGF_ERROR( + AWS_LS_S3_GENERAL, + "CopyRequest x-amz-copy-source header does not follow expected bucket/key format: " PRInSTR, + AWS_BYTE_CURSOR_PRI(source_header)); + goto error_cleanup; + } - struct aws_http_message *message = NULL; + if (aws_byte_buf_init_copy_from_cursor(&head_object_host_header, allocator, source_bucket)) { + goto error_cleanup; + } - /* find the x-amz-copy-source header */ - struct aws_http_headers *headers = aws_http_message_get_headers(base_message); + /* Reuse the domain name from the original Host header for the HEAD request. + * TODO: following code works by replacing bucket name in the host with the + * source bucket name. this only works for virtual host endpoints and has a + * slew of other issues, like not supporting source in a different region. + * This covers common case, but we need to rethink how we can support all + * cases in general. + */ + struct aws_byte_cursor domain_name; + const struct aws_byte_cursor dot = aws_byte_cursor_from_c_str("."); + if (aws_byte_cursor_find_exact(&host, &dot, &domain_name)) { + AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "CopyRequest Host header not in FQDN format"); + goto error_cleanup; + } - struct aws_byte_cursor source_bucket; - AWS_ZERO_STRUCT(source_bucket); + if (aws_byte_buf_append_dynamic(&head_object_host_header, &domain_name)) { + goto error_cleanup; + } - const struct aws_byte_cursor copy_source_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"); - if (aws_http_headers_get(headers, copy_source_header, &source_bucket) != AWS_OP_SUCCESS) { - AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "CopyRequest is missing the x-amz-copy-source header"); - return NULL; + message = aws_http_message_new_request(allocator); + if (message == NULL) { + goto error_cleanup; } - if (source_bucket.len > 1 && source_bucket.ptr[0] == '/') { - /* skip the leading slash */ - aws_byte_cursor_advance(&source_bucket, 1); + if (aws_http_message_set_request_method(message, g_head_method)) { + goto error_cleanup; } - /* as we skipped the optional leading slash, from this point source format is always {bucket}/{key}. split them. - */ - struct aws_byte_cursor source_key = source_bucket; - while (source_key.len > 0) { - if (*source_key.ptr == '/') { - source_bucket.len = source_key.ptr - source_bucket.ptr; - aws_byte_cursor_advance(&source_key, 1); /* skip the / between bucket and key */ - break; - } - aws_byte_cursor_advance(&source_key, 1); + struct aws_http_header host_header = { + .name = g_host_header_name, + .value = aws_byte_cursor_from_buf(&head_object_host_header), + }; + if (aws_http_message_add_header(message, host_header)) { + goto error_cleanup; } - if (source_bucket.len == 0 || source_key.len == 0) { - AWS_LOGF_ERROR( - AWS_LS_S3_GENERAL, - "The CopyRequest x-amz-copy-source header must contain the bucket and object key separated by a slash"); + if (aws_http_message_set_request_path(message, request_path)) { goto error_cleanup; } - message = aws_s3_get_object_size_message_new(allocator, base_message, source_bucket, source_key); -error_cleanup: + aws_byte_buf_clean_up(&head_object_host_header); return message; + +error_cleanup: + aws_byte_buf_clean_up(&head_object_host_header); + aws_http_message_release(message); + return NULL; } static const struct aws_byte_cursor s_complete_payload_begin = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( @@ -553,14 +552,13 @@ struct aws_http_message *aws_s3_complete_multipart_message_new( struct aws_http_message *base_message, struct aws_byte_buf *body_buffer, const struct aws_string *upload_id, - const struct aws_array_list *etags, - struct aws_byte_buf *checksums, + const struct aws_array_list *parts, enum aws_s3_checksum_algorithm algorithm) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(base_message); AWS_PRECONDITION(body_buffer); AWS_PRECONDITION(upload_id); - AWS_PRECONDITION(etags); + AWS_PRECONDITION(parts); const struct aws_byte_cursor *mpu_algorithm_checksum_name = aws_get_complete_mpu_name_from_algorithm(algorithm); @@ -601,7 +599,7 @@ struct aws_http_message *aws_s3_complete_multipart_message_new( goto error_clean_up; } - /* Create XML payload with all of the etags of finished parts */ + /* Create XML payload with all the etags of finished parts */ { aws_byte_buf_reset(body_buffer, false); @@ -609,18 +607,17 @@ struct aws_http_message *aws_s3_complete_multipart_message_new( goto error_clean_up; } - for (size_t etag_index = 0; etag_index < aws_array_list_length(etags); ++etag_index) { - struct aws_string *etag = NULL; - - aws_array_list_get_at(etags, &etag, etag_index); + for (size_t part_index = 0; part_index < aws_array_list_length(parts); ++part_index) { + struct aws_s3_mpu_part_info *part = NULL; - AWS_FATAL_ASSERT(etag != NULL); + aws_array_list_get_at(parts, &part, part_index); + AWS_FATAL_ASSERT(part != NULL); if (aws_byte_buf_append_dynamic(body_buffer, &s_part_section_string_0)) { goto error_clean_up; } - struct aws_byte_cursor etag_byte_cursor = aws_byte_cursor_from_string(etag); + struct aws_byte_cursor etag_byte_cursor = aws_byte_cursor_from_string(part->etag); if (aws_byte_buf_append_dynamic(body_buffer, &etag_byte_cursor)) { goto error_clean_up; @@ -631,7 +628,7 @@ struct aws_http_message *aws_s3_complete_multipart_message_new( } char part_number_buffer[32] = ""; - int part_number = (int)(etag_index + 1); + int part_number = (int)(part_index + 1); int part_number_num_char = snprintf(part_number_buffer, sizeof(part_number_buffer), "%d", part_number); struct aws_byte_cursor part_number_byte_cursor = aws_byte_cursor_from_array(part_number_buffer, part_number_num_char); @@ -643,8 +640,9 @@ struct aws_http_message *aws_s3_complete_multipart_message_new( if (aws_byte_buf_append_dynamic(body_buffer, &s_close_part_number_tag)) { goto error_clean_up; } + if (mpu_algorithm_checksum_name) { - struct aws_byte_cursor checksum = aws_byte_cursor_from_buf(&checksums[etag_index]); + struct aws_byte_cursor checksum = aws_byte_cursor_from_buf(&part->checksum_base64); if (aws_byte_buf_append_dynamic(body_buffer, &s_open_start_bracket)) { goto error_clean_up; @@ -745,6 +743,8 @@ struct aws_input_stream *aws_s3_message_util_assign_body( } struct aws_input_stream *input_stream = aws_input_stream_new_from_cursor(allocator, &buffer_byte_cursor); + struct aws_byte_buf content_encoding_header_buf; + AWS_ZERO_STRUCT(content_encoding_header_buf); if (input_stream == NULL) { goto error_clean_up; @@ -754,11 +754,31 @@ struct aws_input_stream *aws_s3_message_util_assign_body( if (checksum_config->location == AWS_SCL_TRAILER) { /* aws-chunked encode the payload and add related headers */ - /* set Content-Encoding header. TODO: the aws-chunked should be appended to the existing content encoding. + /* set Content-Encoding header. If the header already exists, append the exisiting value to aws-chunked + * We already made sure that the existing value is not 'aws_chunked' in 'aws_s3_client_make_meta_request' + * function. */ - if (aws_http_headers_set(headers, g_content_encoding_header_name, g_content_encoding_header_aws_chunked)) { + struct aws_byte_cursor content_encoding_header_cursor; + bool has_content_encoding_header = + aws_http_headers_get(headers, g_content_encoding_header_name, &content_encoding_header_cursor) == + AWS_OP_SUCCESS; + size_t content_encoding_header_buf_size = + has_content_encoding_header + ? g_content_encoding_header_aws_chunked.len + content_encoding_header_cursor.len + 1 + : g_content_encoding_header_aws_chunked.len; + aws_byte_buf_init(&content_encoding_header_buf, allocator, content_encoding_header_buf_size); + + if (has_content_encoding_header) { + aws_byte_buf_append_dynamic(&content_encoding_header_buf, &content_encoding_header_cursor); + aws_byte_buf_append_byte_dynamic(&content_encoding_header_buf, ','); + } + aws_byte_buf_append_dynamic(&content_encoding_header_buf, &g_content_encoding_header_aws_chunked); + + if (aws_http_headers_set( + headers, g_content_encoding_header_name, aws_byte_cursor_from_buf(&content_encoding_header_buf))) { goto error_clean_up; } + /* set x-amz-trailer header */ if (aws_http_headers_set( headers, @@ -803,12 +823,13 @@ struct aws_input_stream *aws_s3_message_util_assign_body( aws_http_message_set_body_stream(out_message, input_stream); /* Let the message take the full ownership */ aws_input_stream_release(input_stream); - + aws_byte_buf_clean_up(&content_encoding_header_buf); return input_stream; error_clean_up: AWS_LOGF_ERROR(AWS_LS_S3_CLIENT, "Failed to assign body for s3 request http message, from body buffer ."); aws_input_stream_release(input_stream); + aws_byte_buf_clean_up(&content_encoding_header_buf); return NULL; } @@ -918,49 +939,6 @@ error_clean_up: return NULL; } -/* Copy message and retain all headers, but replace body with one that reads directly from a filepath. */ -struct aws_http_message *aws_s3_message_util_copy_http_message_filepath_body_all_headers( - struct aws_allocator *allocator, - struct aws_http_message *base_message, - struct aws_byte_cursor filepath) { - - bool success = false; - struct aws_string *filepath_str = NULL; - struct aws_input_stream *body_stream = NULL; - struct aws_http_message *message = NULL; - - /* Copy message and retain all headers */ - message = aws_s3_message_util_copy_http_message_no_body_filter_headers( - allocator, - base_message, - NULL /*excluded_header_array*/, - 0 /*excluded_header_array_size*/, - false /*exclude_x_amz_meta*/); - if (!message) { - goto clean_up; - } - - /* Create body-stream that reads from file */ - filepath_str = aws_string_new_from_cursor(allocator, &filepath); - body_stream = aws_input_stream_new_from_file(allocator, aws_string_c_str(filepath_str)); - if (!body_stream) { - goto clean_up; - } - aws_http_message_set_body_stream(message, body_stream); - - success = true; - -clean_up: - aws_string_destroy(filepath_str); - aws_input_stream_release(body_stream); - if (success) { - return message; - } else { - aws_http_message_release(message); - return NULL; - } -} - void aws_s3_message_util_copy_headers( struct aws_http_message *source_message, struct aws_http_message *dest_message, diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_util.c b/contrib/restricted/aws/aws-c-s3/source/s3_util.c index 87b840fc372..ac21402a25d 100644 --- a/contrib/restricted/aws/aws-c-s3/source/s3_util.c +++ b/contrib/restricted/aws/aws-c-s3/source/s3_util.c @@ -5,11 +5,14 @@ #include "aws/s3/private/s3_util.h" #include "aws/s3/private/s3_client_impl.h" +#include "aws/s3/private/s3_meta_request_impl.h" +#include "aws/s3/private/s3_platform_info.h" +#include "aws/s3/private/s3_request.h" #include <aws/auth/credentials.h> +#include <aws/common/clock.h> #include <aws/common/string.h> #include <aws/common/xml_parser.h> #include <aws/http/request_response.h> -#include <aws/s3/s3.h> #include <aws/s3/s3_client.h> #include <inttypes.h> @@ -20,9 +23,11 @@ const struct aws_byte_cursor g_s3_client_version = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(AWS_S3_CLIENT_VERSION); const struct aws_byte_cursor g_s3_service_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3"); +const struct aws_byte_cursor g_s3express_service_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3express"); const struct aws_byte_cursor g_host_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"); const struct aws_byte_cursor g_range_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Range"); const struct aws_byte_cursor g_if_match_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("If-Match"); +const struct aws_byte_cursor g_request_id_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-request-id"); const struct aws_byte_cursor g_etag_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ETag"); const struct aws_byte_cursor g_content_range_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Range"); const struct aws_byte_cursor g_content_type_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"); @@ -53,6 +58,8 @@ const struct aws_byte_cursor g_sha1_complete_mpu_name = AWS_BYTE_CUR_INIT_FROM_S const struct aws_byte_cursor g_sha256_complete_mpu_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ChecksumSHA256"); const struct aws_byte_cursor g_accept_ranges_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("accept-ranges"); const struct aws_byte_cursor g_acl_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"); +const struct aws_byte_cursor g_mp_parts_count_header_name = + AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-mp-parts-count"); const struct aws_byte_cursor g_post_method = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST"); const struct aws_byte_cursor g_head_method = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HEAD"); const struct aws_byte_cursor g_delete_method = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("DELETE"); @@ -60,18 +67,39 @@ const struct aws_byte_cursor g_delete_method = AWS_BYTE_CUR_INIT_FROM_STRING_LIT const struct aws_byte_cursor g_user_agent_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("User-Agent"); const struct aws_byte_cursor g_user_agent_header_product_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CRTS3NativeClient"); - -const struct aws_byte_cursor g_error_body_xml_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Error"); -const struct aws_byte_cursor g_code_body_xml_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Code"); - -const struct aws_byte_cursor g_s3_internal_error_code = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("InternalError"); -const struct aws_byte_cursor g_s3_slow_down_error_code = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SlowDown"); -/* The special error code as Asynchronous Error Codes */ -const struct aws_byte_cursor g_s3_internal_errors_code = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("InternalErrors"); +const struct aws_byte_cursor g_user_agent_header_platform = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("platform"); +const struct aws_byte_cursor g_user_agent_header_unknown = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("unknown"); const uint32_t g_s3_max_num_upload_parts = 10000; const size_t g_s3_min_upload_part_size = MB_TO_BYTES(5); +const char *aws_s3_request_type_operation_name(enum aws_s3_request_type type) { + switch (type) { + case AWS_S3_REQUEST_TYPE_HEAD_OBJECT: + return "HeadObject"; + case AWS_S3_REQUEST_TYPE_GET_OBJECT: + return "GetObject"; + case AWS_S3_REQUEST_TYPE_LIST_PARTS: + return "ListParts"; + case AWS_S3_REQUEST_TYPE_CREATE_MULTIPART_UPLOAD: + return "CreateMultipartUpload"; + case AWS_S3_REQUEST_TYPE_UPLOAD_PART: + return "UploadPart"; + case AWS_S3_REQUEST_TYPE_ABORT_MULTIPART_UPLOAD: + return "AbortMultipartUpload"; + case AWS_S3_REQUEST_TYPE_COMPLETE_MULTIPART_UPLOAD: + return "CompleteMultipartUpload"; + case AWS_S3_REQUEST_TYPE_UPLOAD_PART_COPY: + return "UploadPartCopy"; + case AWS_S3_REQUEST_TYPE_COPY_OBJECT: + return "CopyObject"; + case AWS_S3_REQUEST_TYPE_PUT_OBJECT: + return "PutObject"; + default: + return ""; + } +} + void copy_http_headers(const struct aws_http_headers *src, struct aws_http_headers *dest) { AWS_PRECONDITION(src); AWS_PRECONDITION(dest); @@ -85,161 +113,138 @@ void copy_http_headers(const struct aws_http_headers *src, struct aws_http_heade aws_http_headers_set(dest, header.name, header.value); } } - -struct top_level_xml_tag_value_with_root_value_user_data { +/* user_data for XML traversal */ +struct xml_get_body_at_path_traversal { struct aws_allocator *allocator; - const struct aws_byte_cursor *tag_name; - const struct aws_byte_cursor *expected_root_name; - bool *root_name_mismatch; - struct aws_string *result; + const char **path_name_array; + size_t path_name_count; + size_t path_name_i; + struct aws_byte_cursor *out_body; + bool found_node; }; -static bool s_top_level_xml_tag_value_child_xml_node( - struct aws_xml_parser *parser, - struct aws_xml_node *node, - void *user_data) { +static int s_xml_get_body_at_path_on_node(struct aws_xml_node *node, void *user_data) { + struct xml_get_body_at_path_traversal *traversal = user_data; - struct aws_byte_cursor node_name; - - /* If we can't get the name of the node, stop traversing. */ - if (aws_xml_node_get_name(node, &node_name)) { - return false; + /* if we already found what we're looking for, just finish parsing */ + if (traversal->found_node) { + return AWS_OP_SUCCESS; } - struct top_level_xml_tag_value_with_root_value_user_data *xml_user_data = user_data; - - /* If the name of the node is what we are looking for, store the body of the node in our result, and stop - * traversing. */ - if (aws_byte_cursor_eq(&node_name, xml_user_data->tag_name)) { - - struct aws_byte_cursor node_body; - aws_xml_node_as_body(parser, node, &node_body); - - xml_user_data->result = aws_string_new_from_cursor(xml_user_data->allocator, &node_body); - - return false; - } - - /* If we made it here, the tag hasn't been found yet, so return true to keep looking. */ - return true; -} - -static bool s_top_level_xml_tag_value_root_xml_node( - struct aws_xml_parser *parser, - struct aws_xml_node *node, - void *user_data) { - struct top_level_xml_tag_value_with_root_value_user_data *xml_user_data = user_data; - if (xml_user_data->expected_root_name) { - /* If we can't get the name of the node, stop traversing. */ - struct aws_byte_cursor node_name; - if (aws_xml_node_get_name(node, &node_name)) { - return false; - } - if (!aws_byte_cursor_eq(&node_name, xml_user_data->expected_root_name)) { - /* Not match the expected root name, stop parsing. */ - *xml_user_data->root_name_mismatch = true; - return false; + /* check if this node is on the path */ + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); + const char *expected_name = traversal->path_name_array[traversal->path_name_i]; + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, expected_name)) { + + bool is_final_node_on_path = traversal->path_name_i + 1 == traversal->path_name_count; + if (is_final_node_on_path) { + /* retrieve the body */ + if (aws_xml_node_as_body(node, traversal->out_body) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; + } + traversal->found_node = true; + return AWS_OP_SUCCESS; + } else { + /* node is on path, but it's not the final node, so traverse its children */ + traversal->path_name_i++; + if (aws_xml_node_traverse(node, s_xml_get_body_at_path_on_node, traversal) != AWS_OP_SUCCESS) { + return AWS_OP_ERR; + } + traversal->path_name_i--; + return AWS_OP_SUCCESS; } + } else { + /* this node is not on the path, continue parsing siblings */ + return AWS_OP_SUCCESS; } - - /* Traverse the root node, and then return false to stop. */ - aws_xml_node_traverse(parser, node, s_top_level_xml_tag_value_child_xml_node, user_data); - return false; } -struct aws_string *aws_xml_get_top_level_tag_with_root_name( +int aws_xml_get_body_at_path( struct aws_allocator *allocator, - const struct aws_byte_cursor *tag_name, - const struct aws_byte_cursor *expected_root_name, - bool *out_root_name_mismatch, - struct aws_byte_cursor *xml_body) { - AWS_PRECONDITION(allocator); - AWS_PRECONDITION(tag_name); - AWS_PRECONDITION(xml_body); - - struct aws_xml_parser_options parser_options = {.doc = *xml_body}; - struct aws_xml_parser *parser = aws_xml_parser_new(allocator, &parser_options); - bool root_name_mismatch = false; - - struct top_level_xml_tag_value_with_root_value_user_data xml_user_data = { - allocator, - tag_name, - expected_root_name, - &root_name_mismatch, - NULL, + struct aws_byte_cursor xml_doc, + const char **path_name_array, + struct aws_byte_cursor *out_body) { + + struct xml_get_body_at_path_traversal traversal = { + .allocator = allocator, + .path_name_array = path_name_array, + .path_name_count = 0, + .out_body = out_body, }; - if (aws_xml_parser_parse(parser, s_top_level_xml_tag_value_root_xml_node, (void *)&xml_user_data)) { - aws_string_destroy(xml_user_data.result); - xml_user_data.result = NULL; - goto clean_up; - } - if (out_root_name_mismatch) { - *out_root_name_mismatch = root_name_mismatch; + /* find path_name_count */ + while (path_name_array[traversal.path_name_count] != NULL) { + traversal.path_name_count++; + AWS_ASSERT(traversal.path_name_count < 4); /* sanity check, increase cap if necessary */ } + AWS_ASSERT(traversal.path_name_count > 0); -clean_up: - - aws_xml_parser_destroy(parser); + /* parse XML */ + struct aws_xml_parser_options parse_options = { + .doc = xml_doc, + .on_root_encountered = s_xml_get_body_at_path_on_node, + .user_data = &traversal, + }; + if (aws_xml_parse(allocator, &parse_options)) { + goto error; + } - return xml_user_data.result; -} + if (!traversal.found_node) { + aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND); + goto error; + } -struct aws_string *aws_xml_get_top_level_tag( - struct aws_allocator *allocator, - const struct aws_byte_cursor *tag_name, - struct aws_byte_cursor *xml_body) { - return aws_xml_get_top_level_tag_with_root_name(allocator, tag_name, NULL, NULL, xml_body); + return AWS_OP_SUCCESS; +error: + AWS_ZERO_STRUCT(*out_body); + return AWS_OP_ERR; } struct aws_cached_signing_config_aws *aws_cached_signing_config_new( - struct aws_allocator *allocator, + struct aws_s3_client *client, const struct aws_signing_config_aws *signing_config) { - AWS_PRECONDITION(allocator); + AWS_PRECONDITION(client); AWS_PRECONDITION(signing_config); + struct aws_allocator *allocator = client->allocator; + struct aws_cached_signing_config_aws *cached_signing_config = aws_mem_calloc(allocator, 1, sizeof(struct aws_cached_signing_config_aws)); cached_signing_config->allocator = allocator; - cached_signing_config->config.config_type = signing_config->config_type; - cached_signing_config->config.algorithm = signing_config->algorithm; - cached_signing_config->config.signature_type = signing_config->signature_type; + cached_signing_config->config.config_type = + signing_config->config_type ? signing_config->config_type : AWS_SIGNING_CONFIG_AWS; AWS_ASSERT(aws_byte_cursor_is_valid(&signing_config->region)); - if (signing_config->region.len > 0) { cached_signing_config->region = aws_string_new_from_cursor(allocator, &signing_config->region); - - cached_signing_config->config.region = aws_byte_cursor_from_string(cached_signing_config->region); + } else { + /* Fall back to client region. */ + cached_signing_config->region = aws_string_new_from_string(allocator, client->region); } - - AWS_ASSERT(aws_byte_cursor_is_valid(&signing_config->service)); + cached_signing_config->config.region = aws_byte_cursor_from_string(cached_signing_config->region); if (signing_config->service.len > 0) { cached_signing_config->service = aws_string_new_from_cursor(allocator, &signing_config->service); - cached_signing_config->config.service = aws_byte_cursor_from_string(cached_signing_config->service); + } else { + cached_signing_config->config.service = g_s3_service_name; } cached_signing_config->config.date = signing_config->date; - cached_signing_config->config.should_sign_header = signing_config->should_sign_header; - cached_signing_config->config.flags = signing_config->flags; - AWS_ASSERT(aws_byte_cursor_is_valid(&signing_config->signed_body_value)); - if (signing_config->service.len > 0) { + if (signing_config->signed_body_value.len > 0) { cached_signing_config->signed_body_value = aws_string_new_from_cursor(allocator, &signing_config->signed_body_value); - cached_signing_config->config.signed_body_value = aws_byte_cursor_from_string(cached_signing_config->signed_body_value); + } else { + cached_signing_config->config.signed_body_value = g_aws_signed_body_value_unsigned_payload; } - cached_signing_config->config.signed_body_header = signing_config->signed_body_header; - if (signing_config->credentials != NULL) { aws_credentials_acquire(signing_config->credentials); cached_signing_config->config.credentials = signing_config->credentials; @@ -250,6 +255,17 @@ struct aws_cached_signing_config_aws *aws_cached_signing_config_new( cached_signing_config->config.credentials_provider = signing_config->credentials_provider; } + /* Configs default to Zero. */ + cached_signing_config->config.algorithm = signing_config->algorithm; + cached_signing_config->config.signature_type = signing_config->signature_type; + /* TODO: you don't have a way to override this config as the other option is zero. But, you cannot really use the + * other value, as it is always required. */ + cached_signing_config->config.signed_body_header = AWS_SBHT_X_AMZ_CONTENT_SHA256; + cached_signing_config->config.should_sign_header = signing_config->should_sign_header; + /* It's the user's responsibility to keep the user data around */ + cached_signing_config->config.should_sign_header_ud = signing_config->should_sign_header_ud; + + cached_signing_config->config.flags = signing_config->flags; cached_signing_config->config.expiration_in_seconds = signing_config->expiration_in_seconds; return cached_signing_config; @@ -288,31 +304,29 @@ void aws_s3_init_default_signing_config( signing_config->signed_body_value = g_aws_signed_body_value_unsigned_payload; } -void replace_quote_entities(struct aws_allocator *allocator, struct aws_string *str, struct aws_byte_buf *out_buf) { - AWS_PRECONDITION(str); - - aws_byte_buf_init(out_buf, allocator, str->len); - - struct aws_byte_cursor quote_entity = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("""); - struct aws_byte_cursor quote = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\""); +static struct aws_byte_cursor s_quote_entity_literal = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("""); +static struct aws_byte_cursor s_quote_literal = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\""); - size_t i = 0; +struct aws_byte_buf aws_replace_quote_entities(struct aws_allocator *allocator, struct aws_byte_cursor src) { + struct aws_byte_buf out_buf; + aws_byte_buf_init(&out_buf, allocator, src.len); - while (i < str->len) { - size_t chars_remaining = str->len - i; + for (size_t i = 0; i < src.len; ++i) { + size_t chars_remaining = src.len - i; - if (chars_remaining >= quote_entity.len && - !strncmp((const char *)&str->bytes[i], (const char *)quote_entity.ptr, quote_entity.len)) { + if (chars_remaining >= s_quote_entity_literal.len && + !strncmp((const char *)&src.ptr[i], (const char *)s_quote_entity_literal.ptr, s_quote_entity_literal.len)) { /* Append quote */ - aws_byte_buf_append(out_buf, "e); - i += quote_entity.len; + aws_byte_buf_append(&out_buf, &s_quote_literal); + i += s_quote_entity_literal.len - 1; } else { /* Append character */ - struct aws_byte_cursor character_cursor = aws_byte_cursor_from_array(&str->bytes[i], 1); - aws_byte_buf_append(out_buf, &character_cursor); - ++i; + struct aws_byte_cursor character_cursor = aws_byte_cursor_from_array(&src.ptr[i], 1); + aws_byte_buf_append(&out_buf, &character_cursor); } } + + return out_buf; } struct aws_string *aws_strip_quotes(struct aws_allocator *allocator, struct aws_byte_cursor in_cur) { @@ -325,9 +339,9 @@ struct aws_string *aws_strip_quotes(struct aws_allocator *allocator, struct aws_ return aws_string_new_from_cursor(allocator, &in_cur); } -int aws_last_error_or_unknown() { +int aws_last_error_or_unknown(void) { int error = aws_last_error(); - + AWS_ASSERT(error != AWS_ERROR_SUCCESS); /* Someone forgot to call aws_raise_error() */ if (error == AWS_ERROR_SUCCESS) { return AWS_ERROR_UNKNOWN; } @@ -339,11 +353,15 @@ void aws_s3_add_user_agent_header(struct aws_allocator *allocator, struct aws_ht AWS_PRECONDITION(allocator); AWS_PRECONDITION(message); - const struct aws_byte_cursor space_delimeter = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" "); + const struct aws_byte_cursor space_delimiter = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" "); const struct aws_byte_cursor forward_slash = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/"); - - const size_t user_agent_product_version_length = - g_user_agent_header_product_name.len + forward_slash.len + g_s3_client_version.len; + struct aws_byte_cursor platform_cursor = aws_s3_get_current_platform_ec2_intance_type(true /* cached_only */); + if (!platform_cursor.len) { + platform_cursor = g_user_agent_header_unknown; + } + const size_t user_agent_length = g_user_agent_header_product_name.len + forward_slash.len + + g_s3_client_version.len + space_delimiter.len + g_user_agent_header_platform.len + + forward_slash.len + platform_cursor.len; struct aws_http_headers *headers = aws_http_message_get_headers(message); AWS_ASSERT(headers != NULL); @@ -355,23 +373,21 @@ void aws_s3_add_user_agent_header(struct aws_allocator *allocator, struct aws_ht AWS_ZERO_STRUCT(user_agent_buffer); if (aws_http_headers_get(headers, g_user_agent_header_name, ¤t_user_agent_header) == AWS_OP_SUCCESS) { - /* If the header was found, then create a buffer with the total size we'll need, and append the curent user + /* If the header was found, then create a buffer with the total size we'll need, and append the current user * agent header with a trailing space. */ aws_byte_buf_init( - &user_agent_buffer, - allocator, - current_user_agent_header.len + space_delimeter.len + user_agent_product_version_length); + &user_agent_buffer, allocator, current_user_agent_header.len + space_delimiter.len + user_agent_length); aws_byte_buf_append_dynamic(&user_agent_buffer, ¤t_user_agent_header); - aws_byte_buf_append_dynamic(&user_agent_buffer, &space_delimeter); + aws_byte_buf_append_dynamic(&user_agent_buffer, &space_delimiter); } else { AWS_ASSERT(aws_last_error() == AWS_ERROR_HTTP_HEADER_NOT_FOUND); /* If the header was not found, then create a buffer with just the size of the user agent string that is about * to be appended to the buffer. */ - aws_byte_buf_init(&user_agent_buffer, allocator, user_agent_product_version_length); + aws_byte_buf_init(&user_agent_buffer, allocator, user_agent_length); } /* Append the client's user-agent string. */ @@ -379,6 +395,10 @@ void aws_s3_add_user_agent_header(struct aws_allocator *allocator, struct aws_ht aws_byte_buf_append_dynamic(&user_agent_buffer, &g_user_agent_header_product_name); aws_byte_buf_append_dynamic(&user_agent_buffer, &forward_slash); aws_byte_buf_append_dynamic(&user_agent_buffer, &g_s3_client_version); + aws_byte_buf_append_dynamic(&user_agent_buffer, &space_delimiter); + aws_byte_buf_append_dynamic(&user_agent_buffer, &g_user_agent_header_platform); + aws_byte_buf_append_dynamic(&user_agent_buffer, &forward_slash); + aws_byte_buf_append_dynamic(&user_agent_buffer, &platform_cursor); } /* Apply the updated header. */ @@ -477,22 +497,98 @@ int aws_s3_parse_content_length_response_header( return result; } -uint32_t aws_s3_get_num_parts(size_t part_size, uint64_t object_range_start, uint64_t object_range_end) { - uint32_t num_parts = 1; +int aws_s3_parse_request_range_header( + struct aws_http_headers *request_headers, + bool *out_has_start_range, + bool *out_has_end_range, + uint64_t *out_start_range, + uint64_t *out_end_range) { + + AWS_PRECONDITION(request_headers); + AWS_PRECONDITION(out_has_start_range); + AWS_PRECONDITION(out_has_end_range); + AWS_PRECONDITION(out_start_range); + AWS_PRECONDITION(out_end_range); + + bool has_start_range = false; + bool has_end_range = false; + uint64_t start_range = 0; + uint64_t end_range = 0; + + struct aws_byte_cursor range_header_value; + + if (aws_http_headers_get(request_headers, g_range_header_name, &range_header_value)) { + return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); + } - uint64_t first_part_size = part_size; - uint64_t first_part_alignment_offset = object_range_start % part_size; + struct aws_byte_cursor range_header_start = aws_byte_cursor_from_c_str("bytes="); - /* If the first part size isn't aligned on the assumed part boundary, make it smaller so that it is. */ - if (first_part_alignment_offset > 0) { - first_part_size = part_size - first_part_alignment_offset; + /* verify bytes= */ + if (!aws_byte_cursor_starts_with(&range_header_value, &range_header_start)) { + return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); } + aws_byte_cursor_advance(&range_header_value, range_header_start.len); + struct aws_byte_cursor substr = {0}; + /* parse start range */ + if (!aws_byte_cursor_next_split(&range_header_value, '-', &substr)) { + return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); + } + if (substr.len > 0) { + if (aws_byte_cursor_utf8_parse_u64(substr, &start_range)) { + return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); + } + has_start_range = true; + } + + /* parse end range */ + if (!aws_byte_cursor_next_split(&range_header_value, '-', &substr)) { + return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); + } + if (substr.len > 0) { + if (aws_byte_cursor_utf8_parse_u64(substr, &end_range)) { + return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); + } + has_end_range = true; + } + + /* verify that there is nothing extra */ + if (aws_byte_cursor_next_split(&range_header_value, '-', &substr)) { + return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); + } + + /* verify that start-range <= end-range */ + if (has_end_range && start_range > end_range) { + return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); + } + + /* verify that start-range or end-range is present */ + if (!has_start_range && !has_end_range) { + return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); + } + + *out_has_start_range = has_start_range; + *out_has_end_range = has_end_range; + *out_start_range = start_range; + *out_end_range = end_range; + return AWS_OP_SUCCESS; +} + +uint32_t aws_s3_calculate_auto_ranged_get_num_parts( + size_t part_size, + uint64_t first_part_size, + uint64_t object_range_start, + uint64_t object_range_end) { + uint32_t num_parts = 1; + + if (first_part_size == 0) { + return num_parts; + } uint64_t second_part_start = object_range_start + first_part_size; /* If the range has room for a second part, calculate the additional amount of parts. */ if (second_part_start <= object_range_end) { - uint64_t aligned_range_remainder = object_range_end + 1 - second_part_start; + uint64_t aligned_range_remainder = object_range_end + 1 - second_part_start; /* range-end is inclusive */ num_parts += (uint32_t)(aligned_range_remainder / (uint64_t)part_size); if ((aligned_range_remainder % part_size) > 0) { @@ -503,10 +599,11 @@ uint32_t aws_s3_get_num_parts(size_t part_size, uint64_t object_range_start, uin return num_parts; } -void aws_s3_get_part_range( +void aws_s3_calculate_auto_ranged_get_part_range( uint64_t object_range_start, uint64_t object_range_end, size_t part_size, + uint64_t first_part_size, uint32_t part_number, uint64_t *out_part_range_start, uint64_t *out_part_range_end) { @@ -518,16 +615,11 @@ void aws_s3_get_part_range( const uint32_t part_index = part_number - 1; /* Part index is assumed to be in a valid range. */ - AWS_ASSERT(part_index < aws_s3_get_num_parts(part_size, object_range_start, object_range_end)); + AWS_ASSERT( + part_index < + aws_s3_calculate_auto_ranged_get_num_parts(part_size, first_part_size, object_range_start, object_range_end)); uint64_t part_size_uint64 = (uint64_t)part_size; - uint64_t first_part_size = part_size_uint64; - uint64_t first_part_alignment_offset = object_range_start % part_size_uint64; - - /* Shrink the part to a smaller size if need be to align to the assumed part boundary. */ - if (first_part_alignment_offset > 0) { - first_part_size = part_size_uint64 - first_part_alignment_offset; - } if (part_index == 0) { /* If this is the first part, then use the first part size. */ @@ -537,7 +629,7 @@ void aws_s3_get_part_range( /* Else, find the next part by adding the object range + total number of whole parts before this one + initial * part size*/ *out_part_range_start = object_range_start + ((uint64_t)(part_index - 1)) * part_size_uint64 + first_part_size; - *out_part_range_end = *out_part_range_start + part_size_uint64 - 1; + *out_part_range_end = *out_part_range_start + part_size_uint64 - 1; /* range-end is inclusive */ } /* Cap the part's range end using the object's range end. */ @@ -546,13 +638,101 @@ void aws_s3_get_part_range( } } -int aws_s3_crt_error_code_from_server_error_code_string(const struct aws_string *error_code_string) { - if (aws_string_eq_byte_cursor(error_code_string, &g_s3_slow_down_error_code)) { +int aws_s3_calculate_optimal_mpu_part_size_and_num_parts( + uint64_t content_length, + size_t client_part_size, + uint64_t client_max_part_size, + size_t *out_part_size, + uint32_t *out_num_parts) { + + AWS_FATAL_ASSERT(out_part_size); + AWS_FATAL_ASSERT(out_num_parts); + + if (content_length == 0) { + *out_part_size = 0; + *out_num_parts = 0; + return AWS_OP_SUCCESS; + } + + uint64_t part_size_uint64 = content_length / (uint64_t)g_s3_max_num_upload_parts; + + if ((content_length % g_s3_max_num_upload_parts) > 0) { + ++part_size_uint64; + } + + if (part_size_uint64 > SIZE_MAX) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "Could not create meta request; required part size of %" PRIu64 " bytes is too large for platform.", + part_size_uint64); + + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + size_t part_size = (size_t)part_size_uint64; + + if (part_size > client_max_part_size) { + AWS_LOGF_ERROR( + AWS_LS_S3_META_REQUEST, + "Could not create meta request; required part size for request is %" PRIu64 + ", but current maximum part size is %" PRIu64, + (uint64_t)part_size, + (uint64_t)client_max_part_size); + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + if (part_size < client_part_size) { + part_size = client_part_size; + } + + if (content_length < part_size) { + /* When the content length is smaller than part size and larger than the threshold, we set one part + * with the whole length */ + part_size = (size_t)content_length; + } + + uint32_t num_parts = (uint32_t)(content_length / part_size); + if ((content_length % part_size) > 0) { + ++num_parts; + } + AWS_ASSERT(num_parts <= g_s3_max_num_upload_parts); + + *out_part_size = part_size; + *out_num_parts = num_parts; + return AWS_OP_SUCCESS; +} + +int aws_s3_crt_error_code_from_server_error_code_string(struct aws_byte_cursor error_code_string) { + if (aws_byte_cursor_eq_c_str_ignore_case(&error_code_string, "SlowDown")) { return AWS_ERROR_S3_SLOW_DOWN; } - if (aws_string_eq_byte_cursor(error_code_string, &g_s3_internal_error_code) || - aws_string_eq_byte_cursor(error_code_string, &g_s3_internal_errors_code)) { + if (aws_byte_cursor_eq_c_str_ignore_case(&error_code_string, "InternalError") || + aws_byte_cursor_eq_c_str_ignore_case(&error_code_string, "InternalErrors")) { return AWS_ERROR_S3_INTERNAL_ERROR; } + if (aws_byte_cursor_eq_c_str_ignore_case(&error_code_string, "RequestTimeTooSkewed")) { + return AWS_ERROR_S3_REQUEST_TIME_TOO_SKEWED; + } return AWS_ERROR_UNKNOWN; } + +void aws_s3_request_finish_up_metrics_synced(struct aws_s3_request *request, struct aws_s3_meta_request *meta_request) { + AWS_PRECONDITION(meta_request); + AWS_PRECONDITION(request); + ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); + + if (request->send_data.metrics != NULL) { + /* Request is done, complete the metrics for the request now. */ + struct aws_s3_request_metrics *metrics = request->send_data.metrics; + aws_high_res_clock_get_ticks((uint64_t *)&metrics->time_metrics.end_timestamp_ns); + metrics->time_metrics.total_duration_ns = + metrics->time_metrics.end_timestamp_ns - metrics->time_metrics.start_timestamp_ns; + + if (meta_request->telemetry_callback != NULL) { + struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_TELEMETRY}; + event.u.telemetry.metrics = aws_s3_request_metrics_acquire(metrics); + aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); + } + request->send_data.metrics = aws_s3_request_metrics_release(metrics); + } +} diff --git a/contrib/restricted/aws/aws-c-s3/source/s3express_credentials_provider.c b/contrib/restricted/aws/aws-c-s3/source/s3express_credentials_provider.c new file mode 100644 index 00000000000..7b0f1002017 --- /dev/null +++ b/contrib/restricted/aws/aws-c-s3/source/s3express_credentials_provider.c @@ -0,0 +1,968 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include "aws/s3/s3express_credentials_provider.h" +#include "aws/s3/private/s3_client_impl.h" +#include "aws/s3/private/s3express_credentials_provider_impl.h" +#include <aws/auth/credentials.h> +#include <aws/s3/private/s3_util.h> +#include <aws/s3/s3_client.h> + +#include <aws/common/clock.h> +#include <aws/common/lru_cache.h> +#include <aws/common/uri.h> +#include <aws/common/xml_parser.h> +#include <aws/http/request_response.h> +#include <aws/http/status_code.h> +#include <aws/io/channel_bootstrap.h> +#include <aws/io/event_loop.h> + +#include <aws/cal/hash.h> + +#include <inttypes.h> + +static struct aws_byte_cursor s_create_session_path_query = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/?session="); +static const size_t s_default_cache_capacity = 100; + +/* Those number are from C++ SDK impl */ +static const uint64_t s_expired_threshold_secs = 5; +static const uint64_t s_about_to_expire_threshold_secs = 60; +static const uint64_t s_background_refresh_interval_secs = 60; + +struct aws_query_callback_node { + struct aws_linked_list_node node; + aws_on_get_credentials_callback_fn *get_cred_callback; + void *get_cred_user_data; +}; + +struct aws_s3express_session_creator { + struct aws_allocator *allocator; + + /* The hash key for the table storing creator and session. */ + struct aws_string *hash_key; + + struct aws_s3express_credentials_provider *provider; + struct aws_byte_buf response_buf; + + /* The region and host of the session we are creating */ + struct aws_string *region; + struct aws_string *host; + + struct { + /* Protected by the impl lock */ + + /* If creating a new session, this is NULL. + * If refreshing an existing session, this points to it. */ + struct aws_s3express_session *session; + /* Node of `struct aws_query_callback_node*` */ + struct aws_linked_list query_queue; + struct aws_s3_meta_request *meta_request; + } synced_data; +}; + +static struct aws_s3express_session *s_aws_s3express_session_new( + struct aws_s3express_credentials_provider *provider, + const struct aws_string *hash_key, + const struct aws_string *region, + const struct aws_string *host, + struct aws_credentials *credentials) { + + struct aws_s3express_session *session = + aws_mem_calloc(provider->allocator, 1, sizeof(struct aws_s3express_session)); + session->allocator = provider->allocator; + session->impl = provider->impl; + session->hash_key = aws_string_new_from_string(provider->allocator, hash_key); + session->host = aws_string_new_from_string(provider->allocator, host); + if (region) { + session->region = aws_string_new_from_string(provider->allocator, region); + } + session->s3express_credentials = credentials; + aws_credentials_acquire(credentials); + return session; +} + +static void s_aws_s3express_session_destroy(struct aws_s3express_session *session) { + if (!session) { + return; + } + if (session->creator) { + /* The session is always protected by the lock, we can safely touch the synced data here */ + /* Unset the session, but keep the creator going */ + session->creator->synced_data.session = NULL; + } + aws_string_destroy(session->hash_key); + aws_string_destroy(session->region); + aws_string_destroy(session->host); + aws_credentials_release(session->s3express_credentials); + aws_mem_release(session->allocator, session); +} + +static bool s_s3express_session_is_valid(struct aws_s3express_session *session, uint64_t now_seconds) { + AWS_ASSERT(session->s3express_credentials); + if (session->impl->mock_test.s3express_session_is_valid_override) { + /* Mock override for testing. */ + return session->impl->mock_test.s3express_session_is_valid_override(session, now_seconds); + } + uint64_t expire_secs = aws_credentials_get_expiration_timepoint_seconds(session->s3express_credentials); + uint64_t threshold_secs = 0; + int overflow = aws_add_u64_checked(now_seconds, s_expired_threshold_secs, &threshold_secs); + AWS_ASSERT(!overflow); + (void)overflow; + /* If it's too close to be expired, we consider the session is invalid */ + return threshold_secs < expire_secs; +} + +static bool s_s3express_session_about_to_expire(struct aws_s3express_session *session, uint64_t now_seconds) { + AWS_ASSERT(session->s3express_credentials); + if (session->impl->mock_test.s3express_session_about_to_expire_override) { + /* Mock override for testing. */ + return session->impl->mock_test.s3express_session_about_to_expire_override(session, now_seconds); + } + uint64_t expire_secs = aws_credentials_get_expiration_timepoint_seconds(session->s3express_credentials); + uint64_t threshold_secs = 0; + int overflow = aws_add_u64_checked(now_seconds, s_about_to_expire_threshold_secs, &threshold_secs); + AWS_ASSERT(!overflow); + (void)overflow; + return threshold_secs >= expire_secs; +} + +static struct aws_s3express_session_creator *s_aws_s3express_session_creator_destroy( + struct aws_s3express_session_creator *session_creator); + +static void s_credentials_provider_s3express_impl_lock_synced_data( + struct aws_s3express_credentials_provider_impl *impl) { + int err = aws_mutex_lock(&impl->synced_data.lock); + AWS_ASSERT(!err); + (void)err; +} + +static void s_credentials_provider_s3express_impl_unlock_synced_data( + struct aws_s3express_credentials_provider_impl *impl) { + int err = aws_mutex_unlock(&impl->synced_data.lock); + AWS_ASSERT(!err); + (void)err; +} + +static int s_on_incoming_body_fn( + struct aws_s3_meta_request *meta_request, + const struct aws_byte_cursor *body, + uint64_t range_start, + void *user_data) { + (void)meta_request; + (void)range_start; + + struct aws_s3express_session_creator *session_creator = user_data; + return aws_byte_buf_append_dynamic(&session_creator->response_buf, body); +} + +/* parse credentials of form +<?xml version="1.0" encoding="UTF-8"?> +<CreateSessionResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> + <Credentials> + <SessionToken>sessionToken</SessionToken> + <SecretAccessKey>secretKey</SecretAccessKey> + <AccessKeyId>accessKeyId</AccessKeyId> + <Expiration>2023-06-26T17:33:30Z</Expiration> + </Credentials> +</CreateSessionResult> + */ + +struct aws_s3express_xml_parser_user_data { + struct aws_allocator *allocator; + struct aws_string *access_key_id; + struct aws_string *secret_access_key; + struct aws_string *session_token; + void *log_id; + uint64_t expire_timestamp_secs; +}; + +static int s_s3express_xml_traversing_credentials(struct aws_xml_node *node, void *user_data) { + + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); + + struct aws_s3express_xml_parser_user_data *parser_ud = user_data; + struct aws_byte_cursor credential_data; + AWS_ZERO_STRUCT(credential_data); + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SessionToken")) { + if (aws_xml_node_as_body(node, &credential_data)) { + return AWS_OP_ERR; + } + parser_ud->session_token = + aws_string_new_from_array(parser_ud->allocator, credential_data.ptr, credential_data.len); + } + + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SecretAccessKey")) { + if (aws_xml_node_as_body(node, &credential_data)) { + return AWS_OP_ERR; + } + parser_ud->secret_access_key = + aws_string_new_from_array(parser_ud->allocator, credential_data.ptr, credential_data.len); + } + + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AccessKeyId")) { + if (aws_xml_node_as_body(node, &credential_data)) { + return AWS_OP_ERR; + } + parser_ud->access_key_id = + aws_string_new_from_array(parser_ud->allocator, credential_data.ptr, credential_data.len); + } + + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Expiration")) { + if (aws_xml_node_as_body(node, &credential_data)) { + return AWS_OP_ERR; + } + AWS_LOGF_TRACE( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p): Read Expiration " PRInSTR "", + (void *)parser_ud->log_id, + AWS_BYTE_CURSOR_PRI(credential_data)); + struct aws_date_time dt; + if (aws_date_time_init_from_str_cursor(&dt, &credential_data, AWS_DATE_FORMAT_AUTO_DETECT)) { + AWS_LOGF_ERROR( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p): Failed to parse Expiration " PRInSTR "", + (void *)parser_ud->log_id, + AWS_BYTE_CURSOR_PRI(credential_data)); + return AWS_OP_ERR; + } + parser_ud->expire_timestamp_secs = (uint64_t)aws_date_time_as_epoch_secs(&dt); + } + + return AWS_OP_SUCCESS; +} + +static int s_s3express_xml_traversing_CreateSessionResult_children(struct aws_xml_node *node, void *user_data) { + + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Credentials")) { + return aws_xml_node_traverse(node, s_s3express_xml_traversing_credentials, user_data); + } + + return AWS_OP_SUCCESS; +} + +static int s_s3express_xml_traversing_root(struct aws_xml_node *node, void *user_data) { + + struct aws_byte_cursor node_name = aws_xml_node_get_name(node); + if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "CreateSessionResult")) { + return aws_xml_node_traverse(node, s_s3express_xml_traversing_CreateSessionResult_children, user_data); + } + + return AWS_OP_SUCCESS; +} + +static struct aws_credentials *s_parse_s3express_xml( + struct aws_allocator *alloc, + struct aws_byte_cursor xml, + void *logging_id) { + + struct aws_credentials *credentials = NULL; + + struct aws_s3express_xml_parser_user_data user_data = { + .allocator = alloc, + .log_id = logging_id, + }; + struct aws_xml_parser_options options = { + .doc = xml, + .on_root_encountered = s_s3express_xml_traversing_root, + .user_data = &user_data, + }; + if (aws_xml_parse(alloc, &options)) { + AWS_LOGF_ERROR( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p): credentials parsing failed with error %s", + logging_id, + aws_error_debug_str(aws_last_error())); + goto done; + } + if (user_data.access_key_id && user_data.secret_access_key && user_data.session_token && + user_data.expire_timestamp_secs) { + + credentials = aws_credentials_new_from_string( + alloc, + user_data.access_key_id, + user_data.secret_access_key, + user_data.session_token, + user_data.expire_timestamp_secs); + } + +done: + /* Clean up resource */ + aws_string_destroy(user_data.access_key_id); + aws_string_destroy(user_data.secret_access_key); + aws_string_destroy(user_data.session_token); + + return credentials; +} + +/* called upon completion of meta request */ +static void s_on_request_finished( + struct aws_s3_meta_request *meta_request, + const struct aws_s3_meta_request_result *meta_request_result, + void *user_data) { + (void)meta_request; + struct aws_s3express_session_creator *session_creator = user_data; + struct aws_s3express_credentials_provider_impl *impl = session_creator->provider->impl; + if (impl->mock_test.meta_request_finished_overhead) { + impl->mock_test.meta_request_finished_overhead(meta_request, meta_request_result, user_data); + } + + struct aws_linked_list pending_callbacks; + aws_linked_list_init(&pending_callbacks); + + struct aws_credentials *credentials = NULL; + int error_code = meta_request_result->error_code; + + AWS_LOGF_DEBUG( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p): CreateSession call completed with http status: %d and error code %s", + (void *)session_creator->provider, + meta_request_result->response_status, + aws_error_debug_str(error_code)); + + if (error_code && meta_request_result->error_response_body && meta_request_result->error_response_body->len > 0) { + /* The Create Session failed with an error response from S3, provide a specific error code for user. */ + error_code = AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED; + AWS_LOGF_ERROR( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p): CreateSession call failed with http status: %d, and error response body is: %.*s", + (void *)session_creator->provider, + meta_request_result->response_status, + (int)meta_request_result->error_response_body->len, + meta_request_result->error_response_body->buffer); + } + + if (error_code == AWS_ERROR_SUCCESS) { + credentials = s_parse_s3express_xml( + session_creator->allocator, aws_byte_cursor_from_buf(&session_creator->response_buf), session_creator); + + if (!credentials) { + error_code = AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE; + AWS_LOGF_ERROR( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p): failed to read credentials from document, treating as an error.", + (void *)session_creator->provider); + } + } + { /* BEGIN CRITICAL SECTION */ + s_credentials_provider_s3express_impl_lock_synced_data(impl); + aws_linked_list_swap_contents(&session_creator->synced_data.query_queue, &pending_callbacks); + aws_hash_table_remove(&impl->synced_data.session_creator_table, session_creator->hash_key, NULL, NULL); + struct aws_s3express_session *session = session_creator->synced_data.session; + if (session) { + session->creator = NULL; + if (error_code == AWS_ERROR_SUCCESS) { + /* The session already existed, just update the credentials for the session */ + aws_credentials_release(session->s3express_credentials); + session->s3express_credentials = credentials; + aws_credentials_acquire(credentials); + } else { + /* The session failed to be created, remove the session from the cache. */ + aws_cache_remove(impl->synced_data.cache, session->hash_key); + } + } else if (error_code == AWS_ERROR_SUCCESS) { + /* Create a new session when we get valid credentials and put it into cache */ + session = s_aws_s3express_session_new( + session_creator->provider, + session_creator->hash_key, + session_creator->region, + session_creator->host, + credentials); + aws_cache_put(impl->synced_data.cache, session->hash_key, session); + } + + s_credentials_provider_s3express_impl_unlock_synced_data(impl); + } /* END CRITICAL SECTION */ + + /* Invoked all callbacks */ + while (!aws_linked_list_empty(&pending_callbacks)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&pending_callbacks); + struct aws_query_callback_node *callback_node = AWS_CONTAINER_OF(node, struct aws_query_callback_node, node); + callback_node->get_cred_callback(credentials, error_code, callback_node->get_cred_user_data); + aws_mem_release(session_creator->allocator, callback_node); + } + aws_credentials_release(credentials); + s_aws_s3express_session_creator_destroy(session_creator); +} + +static struct aws_http_message *s_create_session_request_new( + struct aws_allocator *allocator, + struct aws_byte_cursor host_value) { + struct aws_http_message *request = aws_http_message_new_request(allocator); + + struct aws_http_header host_header = { + .name = g_host_header_name, + .value = host_value, + }; + + if (aws_http_message_add_header(request, host_header)) { + goto error; + } + + struct aws_http_header user_agent_header = { + .name = g_user_agent_header_name, + .value = aws_byte_cursor_from_c_str("aws-sdk-crt/s3express-credentials-provider"), + }; + if (aws_http_message_add_header(request, user_agent_header)) { + goto error; + } + + if (aws_http_message_set_request_method(request, aws_http_method_get)) { + goto error; + } + + if (aws_http_message_set_request_path(request, s_create_session_path_query)) { + goto error; + } + return request; +error: + return aws_http_message_release(request); +} + +/* Clean up resources that only related to one create session call */ +static struct aws_s3express_session_creator *s_aws_s3express_session_creator_destroy( + struct aws_s3express_session_creator *session_creator) { + if (session_creator == NULL) { + return NULL; + } + AWS_FATAL_ASSERT(aws_linked_list_empty(&session_creator->synced_data.query_queue)); + struct aws_s3express_credentials_provider_impl *impl = session_creator->provider->impl; + aws_s3_meta_request_release(session_creator->synced_data.meta_request); + aws_ref_count_release(&impl->internal_ref); + + aws_string_destroy(session_creator->hash_key); + aws_string_destroy(session_creator->region); + aws_string_destroy(session_creator->host); + + aws_byte_buf_clean_up(&session_creator->response_buf); + aws_mem_release(session_creator->allocator, session_creator); + return NULL; +} + +/** + * Encode the hash key to be [host_value][hash_of_credentials] + * hash_of_credentials is the sha256 of [access_key][secret_access_key] + **/ +struct aws_string *aws_encode_s3express_hash_key_new( + struct aws_allocator *allocator, + const struct aws_credentials *original_credentials, + struct aws_byte_cursor host_value) { + + struct aws_byte_buf combine_key_buf; + + /* 1. Combine access_key and secret_access_key into one buffer */ + struct aws_byte_cursor access_key = aws_credentials_get_access_key_id(original_credentials); + struct aws_byte_cursor secret_access_key = aws_credentials_get_secret_access_key(original_credentials); + aws_byte_buf_init(&combine_key_buf, allocator, access_key.len + secret_access_key.len); + aws_byte_buf_write_from_whole_cursor(&combine_key_buf, access_key); + aws_byte_buf_write_from_whole_cursor(&combine_key_buf, secret_access_key); + + /* 2. Get sha256 digest from the combined key */ + struct aws_byte_cursor combine_key = aws_byte_cursor_from_buf(&combine_key_buf); + struct aws_byte_buf digest_buf; + aws_byte_buf_init(&digest_buf, allocator, AWS_SHA256_LEN); + aws_sha256_compute(allocator, &combine_key, &digest_buf, 0); + + /* 3. Encode the result to be [host_value][hash_of_credentials] */ + struct aws_byte_buf result_buffer; + aws_byte_buf_init(&result_buffer, allocator, host_value.len + digest_buf.len); + aws_byte_buf_write_from_whole_cursor(&result_buffer, host_value); + aws_byte_buf_write_from_whole_buffer(&result_buffer, digest_buf); + struct aws_string *result = aws_string_new_from_buf(allocator, &result_buffer); + + /* Clean up */ + aws_byte_buf_clean_up(&result_buffer); + aws_byte_buf_clean_up(&combine_key_buf); + aws_byte_buf_clean_up(&digest_buf); + + return result; +} + +static struct aws_s3express_session_creator *s_session_creator_new( + struct aws_s3express_credentials_provider *provider, + const struct aws_credentials *original_credentials, + const struct aws_credentials_properties_s3express *s3express_properties) { + + struct aws_s3express_credentials_provider_impl *impl = provider->impl; + struct aws_http_message *request = s_create_session_request_new(provider->allocator, s3express_properties->host); + if (!request) { + return NULL; + } + if (impl->mock_test.endpoint_override) { + /* NOTE: ONLY FOR TESTS. Erase the host header for endpoint override. */ + aws_http_headers_erase(aws_http_message_get_headers(request), g_host_header_name); + } + + struct aws_s3express_session_creator *session_creator = + aws_mem_calloc(provider->allocator, 1, sizeof(struct aws_s3express_session_creator)); + session_creator->allocator = provider->allocator; + session_creator->provider = provider; + session_creator->host = aws_string_new_from_cursor(session_creator->allocator, &s3express_properties->host); + session_creator->region = aws_string_new_from_cursor(session_creator->allocator, &s3express_properties->region); + + struct aws_signing_config_aws s3express_signing_config = { + .credentials = original_credentials, + .service = g_s3express_service_name, + .region = s3express_properties->region, + }; + + aws_byte_buf_init(&session_creator->response_buf, provider->allocator, 512); + struct aws_s3_meta_request_options options = { + .message = request, + .type = AWS_S3_META_REQUEST_TYPE_DEFAULT, + .body_callback = s_on_incoming_body_fn, + .finish_callback = s_on_request_finished, + .signing_config = &s3express_signing_config, + /* Override endpoint only for tests. */ + .endpoint = impl->mock_test.endpoint_override ? impl->mock_test.endpoint_override : NULL, + .user_data = session_creator, + .operation_name = aws_byte_cursor_from_c_str("CreateSession"), + }; + session_creator->synced_data.meta_request = aws_s3_client_make_meta_request(impl->client, &options); + AWS_FATAL_ASSERT(session_creator->synced_data.meta_request); + aws_http_message_release(request); + aws_ref_count_acquire(&impl->internal_ref); + aws_linked_list_init(&session_creator->synced_data.query_queue); + + return session_creator; +} + +static int s_s3express_get_creds( + struct aws_s3express_credentials_provider *provider, + const struct aws_credentials *original_credentials, + const struct aws_credentials_properties_s3express *s3express_properties, + aws_on_get_credentials_callback_fn callback, + void *user_data) { + if (s3express_properties->host.len == 0) { + AWS_LOGF_ERROR( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p): The host property is empty to get credentials from S3 Express", + (void *)provider); + + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + struct aws_s3express_credentials_provider_impl *impl = provider->impl; + + struct aws_hash_element *session_creator_hash_element = NULL; + int was_created = 0; + struct aws_credentials *s3express_credentials = NULL; + struct aws_byte_cursor access_key; + AWS_ZERO_STRUCT(access_key); + if (original_credentials) { + access_key = aws_credentials_get_access_key_id(original_credentials); + } + + uint64_t current_stamp = UINT64_MAX; + aws_sys_clock_get_ticks(¤t_stamp); + struct aws_string *hash_key = + aws_encode_s3express_hash_key_new(provider->allocator, original_credentials, s3express_properties->host); + uint64_t now_seconds = aws_timestamp_convert(current_stamp, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL); + + s_credentials_provider_s3express_impl_lock_synced_data(impl); + /* Used after free is a crime */ + AWS_FATAL_ASSERT(!impl->synced_data.destroying); + /* Step 1: Check cache. */ + struct aws_s3express_session *session = NULL; + int ret_code = aws_cache_find(impl->synced_data.cache, hash_key, (void **)&session); + AWS_ASSERT(ret_code == AWS_OP_SUCCESS); + if (session) { + /* We found a session */ + session->inactive = false; + AWS_ASSERT(session->s3express_credentials != NULL); + if (s_s3express_session_is_valid(session, now_seconds)) { + s3express_credentials = session->s3express_credentials; + /* Make sure the creds are valid until the callback invokes */ + aws_credentials_acquire(s3express_credentials); + aws_string_destroy(hash_key); + goto unlock; + } else { + /* Remove the session from cache and fall to try to creating the session */ + aws_cache_remove(impl->synced_data.cache, hash_key); + } + } + + /* Step 2: Check the creator map */ + ret_code = aws_hash_table_create( + &impl->synced_data.session_creator_table, hash_key, &session_creator_hash_element, &was_created); + AWS_ASSERT(ret_code == AWS_OP_SUCCESS); + (void)ret_code; + + /* Step 3: Create session if needed */ + if (was_created) { + /* A new session creator needed */ + struct aws_s3express_session_creator *new_session_creator = + s_session_creator_new(provider, original_credentials, s3express_properties); + /* If we failed to create session creator, it's probably OOM or impl error we don't want to handle */ + AWS_FATAL_ASSERT(new_session_creator); + new_session_creator->hash_key = hash_key; + session_creator_hash_element->value = new_session_creator; + } else { + aws_string_destroy(hash_key); + } + + if (s3express_credentials == NULL) { + /* Queue the callback if we don't have a creds to return now. */ + struct aws_s3express_session_creator *session_creator = session_creator_hash_element->value; + struct aws_query_callback_node *callback_node = + aws_mem_acquire(provider->allocator, sizeof(struct aws_query_callback_node)); + callback_node->get_cred_callback = callback; + callback_node->get_cred_user_data = user_data; + aws_linked_list_push_back(&session_creator->synced_data.query_queue, &callback_node->node); + } +unlock: + s_credentials_provider_s3express_impl_unlock_synced_data(impl); + if (s3express_credentials) { + uint64_t expire_secs = aws_credentials_get_expiration_timepoint_seconds(s3express_credentials); + AWS_LOGF_TRACE( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p): Found credentials from cache. Timestamp to expire is %" PRIu64 ", while now is %" PRIu64 ".", + (void *)provider, + expire_secs, + now_seconds); + /* TODO: invoke callback asynced? */ + callback(s3express_credentials, AWS_ERROR_SUCCESS, user_data); + aws_credentials_release(s3express_credentials); + return AWS_OP_SUCCESS; + } + return AWS_OP_SUCCESS; +} + +static void s_finish_provider_destroy(struct aws_s3express_credentials_provider *provider) { + AWS_LOGF_TRACE( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p): finishing destroying S3 Express credentials provider", + (void *)provider); + + struct aws_s3express_credentials_provider_impl *impl = provider->impl; + aws_hash_table_clean_up(&impl->synced_data.session_creator_table); + aws_cache_destroy(impl->synced_data.cache); + aws_credentials_release(impl->default_original_credentials); + aws_credentials_provider_release(impl->default_original_credentials_provider); + aws_mutex_clean_up(&impl->synced_data.lock); + aws_mem_release(provider->allocator, impl->bg_refresh_task); + /* Invoke provider shutdown callback */ + if (provider && provider->shutdown_complete_callback) { + provider->shutdown_complete_callback(provider->shutdown_user_data); + } + aws_mem_release(provider->allocator, provider); +} + +/* This is scheduled to run on the background task's event loop. */ +static void s_clean_up_background_task(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)status; + struct aws_s3express_credentials_provider *provider = arg; + struct aws_s3express_credentials_provider_impl *impl = provider->impl; + + /* Cancel the task will run the task synchronously */ + aws_event_loop_cancel_task(impl->bg_event_loop, impl->bg_refresh_task); + aws_mem_release(provider->allocator, task); + + /* Safely remove the internal ref as the background task is killed. */ + aws_ref_count_release(&impl->internal_ref); +} + +static void s_external_destroy(struct aws_s3express_credentials_provider *provider) { + AWS_LOGF_TRACE( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p): external refcount drops to zero, start destroying", + (void *)provider); + + struct aws_s3express_credentials_provider_impl *impl = provider->impl; + { /* BEGIN CRITICAL SECTION */ + s_credentials_provider_s3express_impl_lock_synced_data(impl); + impl->synced_data.destroying = true; + aws_cache_clear(impl->synced_data.cache); + for (struct aws_hash_iter iter = aws_hash_iter_begin(&impl->synced_data.session_creator_table); + !aws_hash_iter_done(&iter); + aws_hash_iter_next(&iter)) { + struct aws_s3express_session_creator *session_creator = + (struct aws_s3express_session_creator *)iter.element.value; + /* Cancel all meta requests */ + aws_s3_meta_request_cancel(session_creator->synced_data.meta_request); + } + s_credentials_provider_s3express_impl_unlock_synced_data(impl); + } /* END CRITICAL SECTION */ + + /* Clean up the background thread */ + struct aws_task *clean_up_background_task = aws_mem_calloc(provider->allocator, 1, sizeof(struct aws_task)); + aws_task_init(clean_up_background_task, s_clean_up_background_task, provider, "clean_up_s3express_background"); + aws_event_loop_schedule_task_now(impl->bg_event_loop, clean_up_background_task); +} + +static struct aws_s3express_credentials_provider_vtable s_aws_s3express_credentials_provider_vtable = { + .get_credentials = s_s3express_get_creds, + .destroy = s_external_destroy, +}; + +static void s_schedule_bg_refresh(struct aws_s3express_credentials_provider *provider) { + struct aws_s3express_credentials_provider_impl *impl = provider->impl; + + AWS_FATAL_ASSERT(impl->bg_event_loop != NULL); + uint64_t current_stamp = UINT64_MAX; + /* Use high res clock to schedule the task in the future. */ + aws_high_res_clock_get_ticks(¤t_stamp); + uint64_t interval_secs = impl->mock_test.bg_refresh_secs_override == 0 ? s_background_refresh_interval_secs + : impl->mock_test.bg_refresh_secs_override; + + /* Schedule the refresh task to happen in the future. */ + aws_event_loop_schedule_task_future( + impl->bg_event_loop, + impl->bg_refresh_task, + current_stamp + aws_timestamp_convert(interval_secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); + + return; +} + +static void s_refresh_session_list( + struct aws_s3express_credentials_provider *provider, + const struct aws_credentials *current_original_credentials) { + + struct aws_s3express_credentials_provider_impl *impl = provider->impl; + uint64_t current_stamp = UINT64_MAX; + aws_sys_clock_get_ticks(¤t_stamp); + uint64_t now_seconds = aws_timestamp_convert(current_stamp, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL); + AWS_LOGF_TRACE( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): background refreshing task in process", (void *)provider); + { /* BEGIN CRITICAL SECTION */ + s_credentials_provider_s3express_impl_lock_synced_data(impl); + if (impl->synced_data.destroying) { + /* Client is gone, stops doing anything */ + s_credentials_provider_s3express_impl_unlock_synced_data(impl); + return; + } + const struct aws_linked_list *session_list = + aws_linked_hash_table_get_iteration_list(&impl->synced_data.cache->table); + /* Iterate through the cache without changing the priority */ + + struct aws_linked_list_node *node = NULL; + for (node = aws_linked_list_begin(session_list); node != aws_linked_list_end(session_list);) { + /* Iterate through all nodes and clean the resource up */ + struct aws_linked_hash_table_node *table_node = + AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node); + node = aws_linked_list_next(node); + struct aws_s3express_session *session = table_node->value; + if (s_s3express_session_about_to_expire(session, now_seconds)) { + if (session->inactive) { + /* The session has been inactive since last refresh, remove it from the cache. */ + aws_cache_remove(impl->synced_data.cache, session->hash_key); + } else { + /* If we are about to expire, try to refresh the credentials */ + /* Check the creator map */ + struct aws_hash_element *session_creator_hash_element = NULL; + int was_created = 0; + struct aws_string *hash_key = aws_string_new_from_string(provider->allocator, session->hash_key); + int ret_code = aws_hash_table_create( + &impl->synced_data.session_creator_table, + hash_key, + &session_creator_hash_element, + &was_created); + AWS_ASSERT(ret_code == AWS_OP_SUCCESS); + (void)ret_code; + if (was_created) { + struct aws_string *current_creds_hash = aws_encode_s3express_hash_key_new( + provider->allocator, + current_original_credentials, + aws_byte_cursor_from_string(session->host)); + bool creds_match = aws_string_eq(current_creds_hash, hash_key); + aws_string_destroy(current_creds_hash); + if (!creds_match) { + /* The session was created with a separate credentials, we skip refreshing it. */ + if (!s_s3express_session_is_valid(session, now_seconds)) { + /* Purge the session when it is expired. */ + aws_cache_remove(impl->synced_data.cache, session->hash_key); + } + /* Mark it as inactive, so that we can purge the session directly from next refresh */ + session->inactive = true; + /* Remove the element we just created as we skip refrshing. */ + aws_string_destroy(hash_key); + aws_hash_table_remove_element( + &impl->synced_data.session_creator_table, session_creator_hash_element); + goto unlock; + } + + struct aws_credentials_properties_s3express s3express_properties = { + .host = aws_byte_cursor_from_string(session->host), + }; + if (session->region) { + s3express_properties.region = aws_byte_cursor_from_string(session->region); + } + /* A new session creator needed to refresh the session */ + struct aws_s3express_session_creator *new_session_creator = + s_session_creator_new(provider, current_original_credentials, &s3express_properties); + AWS_FATAL_ASSERT(new_session_creator); + new_session_creator->synced_data.session = session; + session->creator = new_session_creator; + new_session_creator->hash_key = hash_key; + + session_creator_hash_element->value = new_session_creator; + } else { + /* The session is in process of refreshing. Only valid if the previous create session to + * refresh still not finished, otherwise, it's a bug */ + aws_string_destroy(hash_key); + struct aws_s3express_session_creator *session_creator = session_creator_hash_element->value; + AWS_FATAL_ASSERT(session_creator->synced_data.session == session); + } + session->inactive = true; + } + } + } + unlock: + s_credentials_provider_s3express_impl_unlock_synced_data(impl); + } /* END CRITICAL SECTION */ + s_schedule_bg_refresh(provider); +} + +static void s_get_original_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { + struct aws_s3express_credentials_provider *provider = user_data; + if (error_code) { + AWS_LOGF_DEBUG( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "id=%p: S3 Express Provider back ground refresh failed: Failed to fetch original credentials with " + "error %s. Skipping refresh.", + (void *)provider, + aws_error_debug_str(aws_last_error())); + /* Skip this refresh, but keep schedule the next one */ + s_schedule_bg_refresh(provider); + return; + } + s_refresh_session_list(provider, credentials); +} + +static void s_bg_refresh_task(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)task; + if (status != AWS_TASK_STATUS_RUN_READY) { + return; + } + + struct aws_s3express_credentials_provider *provider = arg; + struct aws_s3express_credentials_provider_impl *impl = provider->impl; + if (impl->default_original_credentials) { + s_refresh_session_list(provider, impl->default_original_credentials); + } else { + /* Get the credentials from provider first. */ + if (aws_credentials_provider_get_credentials( + impl->default_original_credentials_provider, s_get_original_credentials_callback, provider)) { + AWS_LOGF_DEBUG( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "id=%p: S3 Express Provider back ground refresh failed: Failed to get original credentials from " + "provider with error %s. Skipping refresh.", + (void *)provider, + aws_error_debug_str(aws_last_error())); + /* Skip this refresh, but keep schedule the next one */ + s_schedule_bg_refresh(provider); + return; + } + } +} + +void aws_s3express_credentials_provider_init_base( + struct aws_s3express_credentials_provider *provider, + struct aws_allocator *allocator, + struct aws_s3express_credentials_provider_vtable *vtable, + void *impl) { + + AWS_PRECONDITION(provider); + AWS_PRECONDITION(vtable); + + provider->allocator = allocator; + provider->vtable = vtable; + provider->impl = impl; + aws_ref_count_init(&provider->ref_count, provider, (aws_simple_completion_callback *)provider->vtable->destroy); +} + +struct aws_s3express_credentials_provider *aws_s3express_credentials_provider_new_default( + struct aws_allocator *allocator, + const struct aws_s3express_credentials_provider_default_options *options) { + + if (!options->client) { + AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "a S3 client is necessary for querying S3 Express"); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + + struct aws_s3express_credentials_provider *provider = NULL; + struct aws_s3express_credentials_provider_impl *impl = NULL; + + aws_mem_acquire_many( + allocator, + 2, + &provider, + sizeof(struct aws_s3express_credentials_provider), + &impl, + sizeof(struct aws_s3express_credentials_provider_impl)); + + AWS_LOGF_DEBUG(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: creating S3 Express credentials provider"); + AWS_ZERO_STRUCT(*provider); + AWS_ZERO_STRUCT(*impl); + + aws_s3express_credentials_provider_init_base( + provider, allocator, &s_aws_s3express_credentials_provider_vtable, impl); + + aws_hash_table_init( + &impl->synced_data.session_creator_table, + allocator, + 10, + aws_hash_string, + aws_hash_callback_string_eq, + NULL, + NULL); + + impl->synced_data.cache = aws_cache_new_lru( + allocator, + aws_hash_string, + (aws_hash_callback_eq_fn *)aws_string_eq, + NULL, + (aws_hash_callback_destroy_fn *)s_aws_s3express_session_destroy, + s_default_cache_capacity); + AWS_ASSERT(impl->synced_data.cache); + + /* Not keep the s3 client alive to avoid recursive reference */ + impl->client = options->client; + struct aws_signing_config_aws client_cached_config = impl->client->cached_signing_config->config; + if (client_cached_config.credentials) { + impl->default_original_credentials = client_cached_config.credentials; + aws_credentials_acquire(impl->default_original_credentials); + } else { + impl->default_original_credentials_provider = + aws_credentials_provider_acquire(client_cached_config.credentials_provider); + } + + provider->shutdown_complete_callback = options->shutdown_complete_callback; + provider->shutdown_user_data = options->shutdown_user_data; + aws_mutex_init(&impl->synced_data.lock); + aws_ref_count_init(&impl->internal_ref, provider, (aws_simple_completion_callback *)s_finish_provider_destroy); + + /* Init the background refresh task */ + impl->bg_refresh_task = aws_mem_calloc(provider->allocator, 1, sizeof(struct aws_task)); + aws_task_init(impl->bg_refresh_task, s_bg_refresh_task, provider, "s3express_background_refresh"); + /* Get an event loop from the client */ + impl->bg_event_loop = aws_event_loop_group_get_next_loop(impl->client->client_bootstrap->event_loop_group); + impl->mock_test.bg_refresh_secs_override = options->mock_test.bg_refresh_secs_override; + s_schedule_bg_refresh(provider); + + return provider; +} + +struct aws_s3express_credentials_provider *aws_s3express_credentials_provider_release( + struct aws_s3express_credentials_provider *provider) { + if (provider) { + aws_ref_count_release(&provider->ref_count); + } + return NULL; +} + +int aws_s3express_credentials_provider_get_credentials( + struct aws_s3express_credentials_provider *provider, + const struct aws_credentials *original_credentials, + const struct aws_credentials_properties_s3express *property, + aws_on_get_credentials_callback_fn callback, + void *user_data) { + + AWS_PRECONDITION(property); + AWS_PRECONDITION(provider); + AWS_ASSERT(provider->vtable->get_credentials); + + return provider->vtable->get_credentials(provider, original_credentials, property, callback, user_data); +} diff --git a/contrib/restricted/aws/aws-c-s3/ya.make b/contrib/restricted/aws/aws-c-s3/ya.make index f67086f61c1..a4858aefd2b 100644 --- a/contrib/restricted/aws/aws-c-s3/ya.make +++ b/contrib/restricted/aws/aws-c-s3/ya.make @@ -1,4 +1,4 @@ -# Generated by devtools/yamaker from nixpkgs 23.05. +# Generated by devtools/yamaker from nixpkgs 24.05. LIBRARY() @@ -6,9 +6,9 @@ LICENSE(Apache-2.0) LICENSE_TEXTS(.yandex_meta/licenses.list.txt) -VERSION(0.2.8) +VERSION(0.5.7) -ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-s3/archive/v0.2.8.tar.gz) +ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-s3/archive/v0.5.7.tar.gz) PEERDIR( contrib/restricted/aws/aws-c-auth @@ -41,24 +41,14 @@ CFLAGS( -DAWS_USE_EPOLL -DCJSON_HIDE_SYMBOLS -DHAVE_SYSCONF - -DS2N_CLONE_SUPPORTED - -DS2N_CPUID_AVAILABLE - -DS2N_FALL_THROUGH_SUPPORTED - -DS2N_FEATURES_AVAILABLE - -DS2N_KYBER512R3_AVX2_BMI2 - -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH - -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX - -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4 - -DS2N_MADVISE_SUPPORTED - -DS2N_PLATFORM_SUPPORTS_KTLS - -DS2N_STACKTRACE - -DS2N___RESTRICT__SUPPORTED + -DINTEL_NO_ITTNOTIFY_API ) SRCS( source/s3.c source/s3_auto_ranged_get.c source/s3_auto_ranged_put.c + source/s3_buffer_pool.c source/s3_checksum_stream.c source/s3_checksums.c source/s3_chunk_stream.c @@ -70,9 +60,12 @@ SRCS( source/s3_list_parts.c source/s3_meta_request.c source/s3_paginator.c + source/s3_parallel_input_stream.c + source/s3_platform_info.c source/s3_request.c source/s3_request_messages.c source/s3_util.c + source/s3express_credentials_provider.c ) END() |